[
  {
    "path": ".gitignore",
    "content": ".DS_Store"
  },
  {
    "path": "LICENSE",
    "content": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU General Public License is a free, copyleft license for\nsoftware and other kinds of works.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nthe GNU General Public License is intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.  We, the Free Software Foundation, use the\nGNU General Public License for most of our software; it applies also to\nany other work released this way by its authors.  You can apply it to\nyour programs, too.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  To protect your rights, we need to prevent others from denying you\nthese rights or asking you to surrender the rights.  Therefore, you have\ncertain responsibilities if you distribute copies of the software, or if\nyou modify it: responsibilities to respect the freedom of others.\n\n  For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must pass on to the recipients the same\nfreedoms that you received.  You must make sure that they, too, receive\nor can get the source code.  And you must show them these terms so they\nknow their rights.\n\n  Developers that use the GNU GPL protect your rights with two steps:\n(1) assert copyright on the software, and (2) offer you this License\ngiving you legal permission to copy, distribute and/or modify it.\n\n  For the developers' and authors' protection, the GPL clearly explains\nthat there is no warranty for this free software.  For both users' and\nauthors' sake, the GPL requires that modified versions be marked as\nchanged, so that their problems will not be attributed erroneously to\nauthors of previous versions.\n\n  Some devices are designed to deny users access to install or run\nmodified versions of the software inside them, although the manufacturer\ncan do so.  This is fundamentally incompatible with the aim of\nprotecting users' freedom to change the software.  The systematic\npattern of such abuse occurs in the area of products for individuals to\nuse, which is precisely where it is most unacceptable.  Therefore, we\nhave designed this version of the GPL to prohibit the practice for those\nproducts.  If such problems arise substantially in other domains, we\nstand ready to extend this provision to those domains in future versions\nof the GPL, as needed to protect the freedom of users.\n\n  Finally, every program is threatened constantly by software patents.\nStates should not allow patents to restrict development and use of\nsoftware on general-purpose computers, but in those that do, we wish to\navoid the special danger that patents applied to a free program could\nmake it effectively proprietary.  To prevent this, the GPL assures that\npatents cannot be used to render the program non-free.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Use with the GNU Affero General Public License.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU Affero General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the special requirements of the GNU Affero General Public License,\nsection 13, concerning interaction through a network will apply to the\ncombination as such.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU General Public License from time to time.  Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If the program does terminal interaction, make it output a short\nnotice like this when it starts in an interactive mode:\n\n    <program>  Copyright (C) <year>  <name of author>\n    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n    This is free software, and you are welcome to redistribute it\n    under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License.  Of course, your program's commands\nmight be different; for a GUI interface, you would use an \"about box\".\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU GPL, see\n<http://www.gnu.org/licenses/>.\n\n  The GNU General Public License does not permit incorporating your program\ninto proprietary programs.  If your program is a subroutine library, you\nmay consider it more useful to permit linking proprietary applications with\nthe library.  If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License.  But first, please read\n<http://www.gnu.org/philosophy/why-not-lgpl.html>."
  },
  {
    "path": "README.md",
    "content": "# JMeter ec2 Script\n\nThis shell script will allow you to run your local JMeter jmx files either using Amazon's EC2 service or you can provide it with a simple, comma-delimited list of hosts to use. Summary results are printed to the console as the script runs and then all result data is downloaded and concatenated to one file when the test completes ready for more detailed analysis offline.\n\nBy default it will launch the required hardware using Amazon EC2. Using AWS it is much easier and cheaper to scale your test over multiple slaves but if you need to you can also pass in a list of pre-prepared hostnames and the test load will be distributed over these instead. Using your own servers can be useful when the target server to be tested can not be easily accessed from a location external to your test network or you want to repeat a test iteratively.\n\nThe script does not use JMeter's Distributed Mode so you do not need to adjust the test parameters to ensure even distribution of the load; the script will automatically adjust the thread counts based on how many hosts are in use. As the test is running it will collate the results from each host in real time and display an output of the Generate Summary Results listener to the screen (showing both results host by host and an aggregated view for the entire run). Once execution is complete it will download each host's jtl file and collate them all together to give a single jtl file that can be viewed using the usual JMeter listeners.\n\n<img width=\"1254\" alt=\"jmeter-ec2-screenshot-1\" src=\"https://cloud.githubusercontent.com/assets/1336821/14234911/df4385bc-f9e6-11e5-96fa-37230e40a670.png\">\n\n<img width=\"1252\" alt=\"jmeter-ec2-screenshot-2\" src=\"https://cloud.githubusercontent.com/assets/1336821/14234913/e4a7e516-f9e6-11e5-95a3-1152a54e46ea.png\">\n\n## Getting Started\n### Prerequisites\n* An Amazon ec2 account is required (unless valid hosts are specified using REMOTE_HOSTS property).\n* [AWS CLI](https://aws.amazon.com/cli/) must be installed. See the  [userguide](http://docs.aws.amazon.com/cli/latest/userguide/) for setup information.\n* Testplans must contain a [Generate Summary Results Listener](https://jmeter.apache.org/usermanual/component_reference.html#Generate_Summary_Results). No other listeners are required.\n\n### Setup\n 1. Create a project directory on your machine. For example: `~/Documents/WHERETOPUTMYSTUFF/`. This is where you store your testplan and any associated files.\n 2. Download or clone all files from this repo into a suitable directory (e.g. `/usr/local/`).\n 3. Extract the file `example-project.zip` into `~/Documents/WHERETOPUTMYSTUFF/`. You now have a template / example directory structure for your project.\n 4. Edit the file jmeter-ec2.properties as below:\n\n  `INSTANCE_SECURITYGROUP=\"sg-123456\"`\n  The ID of your security group (or groups) created under your Amazon account. It must allow Port 22 to the local machine running this script.\n\n  `PEM_FILE=\"euwest1\"`\n  Your Amazon key file.\n\n  `PEM_PATH=\"/Users/oliver/.ec2\"`\n  The directory (not the full filepath) where the Amazon PEM file is located. **Important**: No trailing '/'!\n\n 5. Copy your JMeter jmx file into the /jmx directory under your root project directory (Ie. myproject) and rename it to the same name as the directory. For example, if you created the directory `/testing/myproject` then you should name the jmx file `myproject.jmx`.\n 6. Copy any data files that are required by your testplan to the /data sub directory.\n 7. Copy any jar files that are required by your testplan to the /plugins sub directory.\n 8. Open a terminal window and cd to the project directory you created (eg. cd /home/username/someproject).\n 9. Type: `count=\"1\" ./path/to/jmeter-ec2.sh`\n Where '1' is the number of instances you wish to spread the test over. If you have provided a list of hosts using `REMOTE_HOSTS` then this value is ignored and all hosts in the list will be used.\n\n\n### Advanced Usage\n    percent=20 count=\"3\" terminate=\"TRUE\" setup=\"TRUE\" env=\"UAT\" release=\"3.23\" comment=\"my notes\" ./jmeter-ec2.sh'\n\n    [count]           - optional, default=1\n    [percent]         - optional, default=100. Should be in the format 1-100 where 20 => 20% of threads will be run by the script.\n    [setup]           - optional, default=TRUE. Set to \"FALSE\" if a pre-defined host is being used that has already been setup (had files copied to it, jmeter installed, etc.)\n    [terminate]       - optional, default=TRUE. Set to \"FALSE\" if the instances created should not be terminated.\n    [price]           - optional, if specified spot instances will be requested at this price\n\n### Advanced Properties\n\n  `AMI_ID=\"[A linix based AMI]\"`\n  Recommended AMIs are provided in the jmeter-ec2.properties file. Both Java and JMeter are installed by the script dynamically if not present.\n\n  `INSTANCE_TYPE=\"m3.medium\"`\n  `micro` type instances do work and are good for developing but they are not recommended for important test runs. Performance can be slow and you risk affecting test results.\n  Note: Older generation instance types require a different type of AMI (paravirtual vs. hmv).\n\n  `USER=\"ubuntu\"`\n  Different AMIs start with different basic users. This value could be 'ec2-user', 'root', 'admin' etc.\n\n  `SUBNET_ID=\"\"`\n  The id of the subnet that the instance will belong to. So long as a [default VPC](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/default-vpc.html) exists for your account you do not need to set this.\n\n  `RUNNINGTOTAL_INTERVAL=\"3\"`\n  How often running totals are printed to the screen. Based on a count of the summariser.interval property. (If the Generate Summary Results listener is set to wait 10 seconds then every 30 (3 * 10) seconds an extra row showing an aggregated summary will be printed.) The summariser.interval property in the standard jmeter.properties file defaults to 180 seconds - in the file included with this project it is set to 15 seconds, like this we default to summary updates every 45 seconds.\n\n  `REMOTE_HOSTS=\"\"`\n  If you do not wish to use ec2 you can provide a comma-separated list of pre-defined hosts.\n\n  `REMOTE_PORT=\"\"`\n  Specify the port sshd is running on for `REMOTE_HOSTS` or ec2. Default 22.\n\n  `ELASTIC_IPS=\"\"`\n  If using ec2, then you can also provide a comma-separated list of pre-defined elastic IPs. This is useful if your test needs to pass through a firewall.\n\n  `JMETER_VERSION=\"apache-jmeter-2.13\"`\n  Allows the version to be chosen dynamically.\n\n### Limitations:\n* JMeter V3 is not tested with this script.\n* There are [limits imposed by Amazon](http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ec2) on how many instances can be run in a new account - the default is 20 instances as of Oct 2011.\n* You cannot have jmeter variables in the testplan field `Thread Count`, this value must be numeric.\n* Testplan file paths cannot be dynamic, any jmeter variables in the filepath will be ignored.\n\n### Why am I seeing `copying install.sh to 1 server(s)...lost connection`?\nThis happens when it is not possible for the script to connect over port 22 to the instance that was created by AWS. There are a number of reasons why this can happen.\n\n**First, can you telnet to the instance?**\nRun the script to create a box but use:\n\n`count=\"1\" terminate=\"FALSE\"./path/to/jmeter-ec2.sh`\n\nThen, take the hostname of the instance just created and try:\n\n`telnet thehostname.com 22`\n\nIf you see something like:\n\n> Trying thehostname.com...\nConnected to thehostname.com\nEscape character is '^]'.\nSSH-2.0-OpenSSH_6.6p1 Ubuntu-2ubuntu1\n\nThen you **DO** have network access.\n\nIf you see:\n\n> Trying 123.456.789.123...\n\nYou **DO NOT** have network access.\n\n#### Things to try if you **DO** have network access\n\n**File permissions on your PEM file**\nYour .pem files [need to be secure](http://stackoverflow.com/questions/1454629/aws-ssh-access-permission-denied-publickey-issue). Use `chmod 600 yourfile.pem`.\n\n**The `USER` property is not correct**\nDifferent AMIs and OSs expect you to log in using different users. Make sure this value is set correctly.\n\n**Install the latest version of the ec2-api-tools**\nCheck [here](http://aws.amazon.com/developertools/351/) and make sure you have the latest version installed. Use `$ ec2-version` to check.\n\n#### Things to try if you **DO NOT** have network access\n\n**Your Security Group is not configured properly**\nThe `INSTANCE_SECURITYGROUP_IDS` property needs to reference the exact ids of one or more security group that exists in the correct region and that contains a rule that allows inbound traffic on port 22 from the machine you are running the script from, or everywhere if you are running the script remotely or just want to rule this out (be sure to reduce this scope later once you've got things working)\n\n**Check local network settings**\nOften port 22 can be blocked by over-zealous local network security settings. You often see this with poor quality wifi services, the type where you have to fill out a marketing form to get access. You can sometimes get around this by using a vpn but often they block this too and then your only choice is to put down your flat white and leave.\n\n\n## Spot instances\n\nBy default this shell script uses on-demand instances. You can use spot instances by requesting an hourly `price` for your EC2 instances.\n\n### Usage:\n`count=\"3\" price=0.0035  ./jmeter-ec2.sh'`\n\n> Spot Instances allow you to name your own price for Amazon EC2 computing capacity. You simply bid on spare Amazon EC2\n> instances and run them whenever your bid exceeds the current Spot Price, which varies in real-time based on supply\n> and demand. The Spot Instance pricing model complements the On-Demand and Reserved Instance pricing models,\n> providing potentially the most cost-effective option for obtaining compute capacity, depending on your application.\n\nRead more at http://aws.amazon.com/ec2/purchasing-options/spot-instances/\n\n\n\n    [price]           - optional, if specified spot instances will be requested at this price\n    [count]           - optional, default=1\n\n\n### Notes\nIf your price is too low spot requests will fail with a status ``` price-too-low ```.\n\nTo get the price history by instance type, use the ```ec2-describe-spot-price-history``` command from [AWS CLI](http://aws.amazon.com/cli/) :\n\nFor example to get current price for t1.micro instance running Linux :\n\n```ec2-describe-spot-price-history -H --instance-type t1.micro -d Linux/UNIX -s `date +\"%Y-%m-%dT%H:%M:%SZ\"````\n\n\n## Running locally with Vagrant\n[Vagrant](http://vagrantup.com) allows you to test your jmeter-ec2 scripts locally before pushing them to ec2.\n\n### Prerequisites\n* [Vagrant](http://vagrantup.com)\n\n### Usage:\nUse `jmeter-ec2.properties.vagrant` as a template for local provisioning. This file is set up to use Vagrant's ssh key, ports, etc.\n```\n# backup your properties files just in case\ncp jmeter-ec2.properties jmeter-ec2.properties.bak\n# use the vagrant properties file\ncp jmeter-ec2.properties.vagrant jmeter-ec2.properties\n# start vm and provision defaultjre\nvagrant up\n# run your project\nproject=\"myproject\" setup=\"TRUE\" ./jmeter-ec2.sh\n```\n\n### Note\n* You may need to edit the `Vagrantfile` to meet any specific networking needs. See Vagrant's [networking documentation](http://docs.vagrantup.com/v2/getting-started/networking.html) for details.\n\n## General Notes:\n### AWS Key Pairs\nTo find your key pairs go to your ec2 dashboard -> Networking and Security -> Key Pairs. Make sure this key pair is in the REGION you also set in the properties file.\n\n### AWS Security Groups\nTo create or check your EC2 security groups go to your ec2 dashboard -> security groups.\n\nCreate a security group (e.g. called jmeter) that allows inbound access on port 22 from the IP of the machine where you are running the script.\n\n### Using AWS\nIt is not uncommon for an instance to fail to start, this is part of using the Cloud and for that reason this script will dynamically respond to this event by adjusting the number of instances that are used for the test. For example, if you request 10 instances but 1 fails then the test will be run using only 9 machines. This should not be a problem as the load will still be evenly spread and the end results (the throughput) identical. In a similar fashion, should Amazon not provide all the instances you asked for (each account is limited) then the script will also adjust to this scenario.\n\n### Using Jmeter\nAny testplan should always have suitable pacing to regulate throughput. This script distributes load based on threads, it is assumed that these threads are setup with suitable timers. If not, adding more hardware could create unpredictable results.\n\n\n## License\nJMeter-ec2 is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nJMeter-ec2 is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with JMeter-ec2.  If not, see <http://www.gnu.org/licenses/>.\n\n\n\nThe source repository is at:\n  [https://github.com/oliverlloyd/jmeter-ec2](https://github.com/oliverlloyd/jmeter-ec2)\n"
  },
  {
    "path": "Vagrantfile",
    "content": "# -*- mode: ruby -*-\n# vi: set ft=ruby :\n\nVagrant.configure(\"2\") do |config|\n  config.vm.box = \"quantal64\"\n  config.vm.box_url = \"https://github.com/downloads/roderik/VagrantQuantal64Box/quantal64.box\"\n\n  # Forward jmeter's command port for shutdown.\n  config.vm.network :forwarded_port, guest: 4445, host: 4445\n\n  # Create a private network, which allows host-only access to the machine\n  # using a specific IP.\n  # config.vm.network :private_network, ip: \"192.168.33.10\"\n\n  # Create a public network, which generally matched to bridged network.\n  # Bridged networks make the machine appear as another physical device on\n  # your network.\n  # config.vm.network :public_network\n\n  # jmeter-ec2.sh requires java installed when using REMTOE_HOSTS\n  config.vm.provision :shell do |shell|\n    shell.inline = \"sudo apt-get update\"\n    shell.inline = \"sudo apt-get -y install default-jre\"\n  end\nend\n"
  },
  {
    "path": "jmeter",
    "content": "#! /bin/sh\n\n##   Licensed to the Apache Software Foundation (ASF) under one or more\n##   contributor license agreements.  See the NOTICE file distributed with\n##   this work for additional information regarding copyright ownership.\n##   The ASF licenses this file to You under the Apache License, Version 2.0\n##   (the \"License\"); you may not use this file except in compliance with\n##   the License.  You may obtain a copy of the License at\n## \n##       http://www.apache.org/licenses/LICENSE-2.0\n## \n##   Unless required by applicable law or agreed to in writing, software\n##   distributed under the License is distributed on an \"AS IS\" BASIS,\n##   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n##   See the License for the specific language governing permissions and\n##   limitations under the License.\n\n##   ==============================================\n##   Environment variables:\n##   JVM_ARGS - optional java args, e.g. -Dprop=val\n##\n##   ==============================================\n\n\n# The following should be reasonably good values for most tests running\n# on Sun JVMs. Following is the analysis on which it is based. If it's total\n# gibberish to you, please study my article at\n# http://www.atg.com/portal/myatg/developer?paf_dm=full&paf_gear_id=1100010&detailArticle=true&id=9606\n#\n# JMeter objects can generally be grouped into three life-length groups:\n#\n# - Per-sample objects (results, DOMs,...). An awful lot of those.\n#   Life length of milliseconds to a few seconds.\n#\n# - Per-run objects (threads, listener data structures,...). Not that many \n#   of those unless we use the table or tree listeners on heavy runs.\n#   Life length of minutes to several hours, from creation to start of next run.\n#\n# - Per-work-session objects (test plans, GUIs,...).\n#   Life length: for the life of the JVM.\n\n# This is the base heap size -- you may increase or decrease it to fit your\n# system's memory availablity:\nHEAP=\"-Xms2048m -Xmx2048m\"\n\n# There's an awful lot of per-sample objects allocated during test run, so we\n# need a large eden to avoid too frequent scavenges -- you'll need to tune this\n# down proportionally if you reduce the HEAP values above:\nNEW=\"-XX:NewSize=256m -XX:MaxNewSize=256m\"\n\n# This ratio and target have been proven OK in tests with a specially high\n# amount of per-sample objects (the HtmlParserHTMLParser tests):\n# SURVIVOR=\"-XX:SurvivorRatio=8 -XX:TargetSurvivorRatio=50%\"\n\n# Think about it: trying to keep per-run objects in tenuring definitely\n# represents a cost, but where's the benefit? They won't disappear before\n# the test is over, and at that point we will no longer care about performance.\n#\n# So we will have JMeter do an explicit Full GC before starting a test run,\n# but then we won't make any effort (or spend any CPU) to keep objects\n# in tenuring longer than the life of per-sample objects -- which is hopefully\n# shorter than the period between two scavenges):\n#\nTENURING=\"-XX:MaxTenuringThreshold=2\"\n\n# This evacuation ratio is OK (see the comments for SURVIVOR) during test\n# runs -- not so sure about operations that bring a lot of long-lived information into\n# memory in a short period of time, such as loading tests or listener data files.\n# Increase it if you experience OutOfMemory problems during those operations\n# without having gone through a lot of Full GC-ing just before the OOM:\n# EVACUATION=\"-XX:MaxLiveObjectEvacuationRatio=20%\"\n\n# Avoid the RMI-induced Full GCs to run too frequently -- once every ten minutes\n# should be more than enough:\nRMIGC=\"-Dsun.rmi.dgc.client.gcInterval=600000 -Dsun.rmi.dgc.server.gcInterval=600000\"\n\n# Increase MaxPermSize if you use a lot of Javascript in your Test Plan :\nPERM=\"-XX:PermSize=64m -XX:MaxPermSize=128m\"\n\n# Finally, some tracing to help in case things go astray:\n#DEBUG=\"-verbose:gc -XX:+PrintTenuringDistribution\"\n\n# Always dump on OOM (does not cost anything unless triggered)\nDUMP=\"-XX:+HeapDumpOnOutOfMemoryError\"\n\nSERVER=\"-server\"\n\nARGS=\"$SERVER $DUMP $HEAP $NEW $SURVIVOR $TENURING $EVACUATION $RMIGC $PERM\"\n\n# Added to counter ELB DNS caching - see: http://wiki.apache.org/jmeter/JMeterAndAmazon\nJVM_ARGS=\"-Dsun.net.inetaddr.ttl=0\"\n\njava $ARGS $JVM_ARGS -jar `dirname $0`/ApacheJMeter.jar \"$@\"\n"
  },
  {
    "path": "jmeter-ec2.properties",
    "content": "#!/bin/bash\n\n# This is a java stye properties file for the jmeter-ec2 shell script\n#\n# It is treated like a normal shell script\n#\n# See README.txt for more details about each property\n#\n\n# Pre Installed AMIs (Dont forget to use the right AMI for your region.)\n#\n# Region          OS          AMI id          Name\n# eu-west-1       Ubuntu\t    ami-ae72f2dd    Ireland\n# ap-south-1      ubuntu      ami-2312664c    Mumbai\n# ap-southeast-1  Ubuntu      ami-aacf1ac9    Singapore\n# ap-northeast-1  Ubuntu      ami-f050439e    Tokyo\n# ap-northeast-2  Ubuntu      ami-48a07426    Seoul\n# ap-southeast-2  Ubuntu      ami-73735110    Sydney\n# eu-central-1    Ubuntu      ami-7c759413    Frankfurt\n# sa-east-1       Ubuntu      ami-6f038c03    Sao Paulo\n# us-east-1       Ubuntu      ami-90d2c4fa    N. Virginia\n# us-west-1       Ubuntu      ami-10473b70    N.California\n# us-west-2       Ubuntu      ami-caf501aa    Oregan\n#\nAMI_ID=\"ami-ae72f2dd\"\n\n# Should match the AMI\n# IMPORTANT - t2.micro is not recommend for anything beyond developement, it works from a shared resource pool that can fluctuate, skewing test results.\nINSTANCE_TYPE=\"t2.micro\"\n\n# Do not change\nREMOTE_HOME=\"/home/ubuntu\"\n\n# The name OR id of *your* security group in *your* Amazon account - the permissions for thius group need to give your local machine ssh access.\nINSTANCE_SECURITYGROUP_IDS=\"sg-48a8b32c\"\n\n# The name of the Amazon Keypair that you want to use. It should exist in *your* AWS account for the region you are using.\nAMAZON_KEYPAIR_NAME=\"euwest1\"\n\n# The full name of the pem file you downloaded from your Amazon account. Usualy .pem from AWS but you could generate your own and name it what you want.\nPEM_FILE=\"euwest1.pem\"\n\n# The path to your pem file\nPEM_PATH=$HOME/.ssh\n\n# Should match the AMI\nUSER=\"ubuntu\"\n\n# Email to be used when tagging instances\nEMAIL=\"\"\n\n# Specify the region you will be working in\nREGION=\"eu-west-1\"\n\n# How often the script prints running totals to the screen (n * summariser.interval seconds)\nRUNNINGTOTAL_INTERVAL=\"3\"\n\n# A list of static IPs that can be assigned to each ec2 host. Ignored if not set\nELASTIC_IPS=\"\"\n\n# The port number sshd is running on\nREMOTE_PORT=\"22\"\n\n# The version of JMeter to be used. Must be the full name used in the dir structure. Does not work for versions prior to 2.5.1\nJMETER_VERSION=\"apache-jmeter-2.13\"\n\n\n#\n# EC2-VPC Usage\n# \n# jmeter-ec2 can configure EC2-VPC instances. You must:\n#   - set SUBNET_ID to the id of the subnet that the instance will belong to\n#   - make sure that the AMI is compatible with EC2-VPC\n#   - enable DNS Resolution in the VPC\n#   - enable DNS hostnames in the VPC\n# \nSUBNET_ID=\"\"                 # The subnet that the instance will belong to.\n\n# Remote hosts\n#\n# If this is set then the script will ignore INSTANCE_COUNT passed in at the command line and read in this list of hostnames to run the test over\n# instead. If it is not set then n number of hosts will be requested from Amazon.\n#\n# Must be a comma-separated list, like this:\n# REMOTE_HOSTS=\"ec2-46-51-135-180.eu-west-1.compute.amazonaws.com,ec2-176-34-204-10.eu-west-1.compute.amazonaws.com\"\n# or:\n# REMOTE_HOSTS=\"myhost.com,antherhost.com\"\n# or:\n# REMOTE_HOSTS=\"blahblah.corp.synergy:2020,10.213.45.6\"\n"
  },
  {
    "path": "jmeter-ec2.properties.vagrant",
    "content": "# #!/bin/bash\n\n# This is a java stye properties file for the jmeter-ec2 shell script\n#\n# It is treated like a normal shell script\n#\n# See README.txt for more details about each property\n#\nLOCAL_HOME=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"      # The root for this script - all files should be put here as per the README\nREMOTE_HOME=\"/tmp\"                          # This can be left as /tmp - it is a temporary working location\nPEM_FILE=\"insecure_private_key\"                    # The full name of the pem file you downloaded from your Amazon account. Usualy .pem from AWS but you could generate your own and name it what you want.\nPEM_PATH=\"$HOME/.vagrant.d\"               # The path to your pem file\nUSER=\"vagrant\"                               # Should match the AMI\nREMOTE_PORT=\"2222\"                            # The port number sshd is running on,\nRUNNINGTOTAL_INTERVAL=\"3\"                   # How often the script prints running totals to the screen (n * summariser.interval seconds)\nJMETER_VERSION=\"apache-jmeter-2.7\"          # The version of JMeter to be used. Must be the full name used in the dir structure. Does not work for versions prior to 2.5.1.\nRUNNINGTOTAL_INTERVAL=\"3\"                   # How often the script prints running totals to the screen (n * summariser.interval seconds)\n\n# REMOTE_HOSTS\nREMOTE_HOSTS=\"127.0.0.1\"\n"
  },
  {
    "path": "jmeter-ec2.sh",
    "content": "#!/bin/bash\n\n# ========================================================================================\n# jmeter-ec2.sh\n# https://github.com/oliverlloyd/jmeter-ec2\n# ========================================================================================\n#\n# Copyright 2012 - Oliver Lloyd - GNU GENERAL PUBLIC LICENSE\n#\n# JMeter-ec2 is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# JMeter-ec2 is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with JMeter-ec2.  If not, see <http://www.gnu.org/licenses/>.\n#\n\nDATETIME=$(date \"+%s\")\n\n# First make sure we have the required params and if not print out an instructive message\n#if [ -z \"$project\" ] ; then\nif [ \"$1\" == \"-h\" ] ; then\n\techo 'usage: project=\"abc\" percent=20 setup=\"TRUE\" terminate=\"TRUE\" count=\"3\" ./jmeter-ec2.sh'\n\techo\n\techo \"[project]         -\trequired, directory and jmx name\"\n\techo \"[count]           -\toptional, default=1\"\n\techo \"[percent]         -\toptional, default=100\"\n\techo \"[setup]           -\toptional, default='TRUE'\"\n\techo \"[terminate]       -\toptional, default='TRUE'\"\n  echo \"[price]           - optional\"\n\techo\n\texit\nfi\n\n# default to 100 if percent is not specified\nif [ -z \"$percent\" ] ; then percent=100 ; fi\n\n# default to TRUE if setup is not specified\nif [ -z \"$setup\" ] ; then setup=\"TRUE\" ; fi\n\n# default to TRUE if terminate is not specified\nif [ -z \"$terminate\" ] ; then terminate=\"TRUE\" ; fi\n\n# move count to instance_count\nif [ -z \"$count\" ] ; then count=1 ; fi\ninstance_count=$count\n\nLOCAL_HOME=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\n\n# Execute the jmeter-ec2.properties file, establishing these constants.\n. $LOCAL_HOME/jmeter-ec2.properties\n\nif [ -z \"$project\" ] ; then\n       project=$(basename `pwd`)\nfi\nproject_home=`pwd`\n\n# If exists then run a local version of the properties file to allow project customisations.\nif [ -f \"$project_home/jmeter-ec2.properties\" ] ; then\n\t. $project_home/jmeter-ec2.properties\nfi\n\ncd $EC2_HOME\n\n# check project directory exists\nif [ ! -d \"$project_home\" ] ; then\n  echo \"The directory $project_home does not exist.\"\n  echo\n  echo \"Script exiting.\"\n  exit\nfi\n\n# The test has not started yet (used to decide what to do when the script stops)\nteststarted=0\n\n# do some basic checks to prevent problems later\nfunction check_prereqs() {\n\t# If there is a custom jmeter.properties, check for:\n\t# - jmeter.save.saveservice.output_format=csv\n\t# - jmeter.save.saveservice.thread_counts=true\n\tif [ -r $LOCAL_HOME/jmeter.properties ] ; then\n    has_csv_output=$(grep -c \"^\\s*jmeter.save.saveservice.output_format=csv\"  $LOCAL_HOME/jmeter.properties)\n    has_thread_counts=$(grep -c \"^\\s*jmeter.save.saveservice.thread_counts=true\" $LOCAL_HOME/jmeter.properties)\n\t  if [ $has_csv_output -eq \"0\" ] ; then\n\t\t  echo \"WARN: Please ensure the jmeter.properties file has 'jmeter.save.saveservice.output_format=csv'. Could not find it!\"\n\t  fi\n\t  if [ $has_thread_counts -eq \"0\" ] ; then\n\t\t  echo \"WARN: Please ensure the jmeter.properties file has 'jmeter.save.saveservice.thread_counts=true'. Could not find it!\"\n\t  fi\n\telse\n\t  echo \"WARN: Did not see a custom jmeter.properties file. Please ensure the remote hosts have the required settings 'jmeter.save.saveservice.output_format=csv' and 'jmeter.save.saveservice.thread_counts=true'\"\n\tfi\n\n\t# Check that the test plan exists\n\tif [ -f \"$project_home/jmx/$project.jmx\" ] ; then\n    # Check that the jmx plan has a Generate Summary Reults listener (testclass=\"Summariser\")\n    summariser_count=$(grep -c \"<Summariser .*testclass=\\\"Summariser\\\"\" $project_home/jmx/$project.jmx)\n    if [ -z $summariser_count ] ; then summariser_count=0 ; fi ;\n    if [ $summariser_count -eq \"0\" ] ; then\n      echo \"ERROR: Please ensure your JMeter test plan has a Generate Summary Results listener! It is needed for jmeter-ec2 to properly work!\"\n    fi\n\telse\n    echo \"ERROR: Could not find test plan at the following location: $project_home/jmx/$project.jmx\"\n    exit\n\tfi\n\n\t# Check that awscli is installed and accessible\n\tif  ! type aws &>/dev/null  ; then\n    echo \"ERROR: awscli does not appear to be installed or accessible from command line (tried aws).\"\n    exit\n\tfi\n}\n\nfunction runsetup() {\n  # if REMOTE_HOSTS is not set then no hosts have been specified to run the test on so we will request them from Amazon\n  if [ -z \"$REMOTE_HOSTS\" ] ; then\n    # check if ELASTIC_IPS is set, if it is we need to make sure we have enough of them\n    if [ ! -z \"$ELASTIC_IPS\" ] ; then # Not Null - same as -n\n      elasticips=(`echo $ELASTIC_IPS | tr \",\" \"\\n\" | tr -d ' '`)\n      elasticips_count=${#elasticips[@]}\n      if [ \"$instance_count\" -gt \"$elasticips_count\" ] ; then\n        echo\n        echo \"You are trying to launch $instance_count instance but you have only specified $elasticips_count elastic IPs.\"\n        echo \"If you wish to use Staitc IPs for each test instance then you must increase the list of values given for ELASTIC_IPS in the properties file.\"\n        echo\n        echo \"Alternatively, if you set the STATIC_IPS property to \\\"\\\" or do not specify it at all then the test will run without trying to assign static IPs.\"\n        echo\n        echo \"Script exiting...\"\n        echo\n        exit\n      fi\n    fi\n\n    # default to 1 instance if a count is not specified\n    if [ -z \"$instance_count\" ] ; then instance_count=1; fi\n\n    echo\n    echo \"   -------------------------------------------------------------------------------------\"\n    echo \"       jmeter-ec2 Automation Script - Running $project.jmx over $instance_count AWS Instance(s)\"\n    echo \"   -------------------------------------------------------------------------------------\"\n    echo\n    echo\n\n    vpcsettings=\"\"\n\t\tspot_launch_specification=\"{\n\t\t\t\\\"KeyName\\\": \\\"$AMAZON_KEYPAIR_NAME\\\",\n\t\t\t\\\"ImageId\\\": \\\"$AMI_ID\\\",\n\t\t\t\\\"InstanceType\\\": \\\"$INSTANCE_TYPE\\\" ,\n\t\t\t\\\"SecurityGroupIds\\\": [\\\"$INSTANCE_SECURITYGROUP_IDS\\\"]\n\t\t}\"\n\n\t\t# if subnet is specified\n    if [ -n \"$SUBNET_ID\" ] ; then\n\t\t\tvpcsettings=\"--subnet-id $SUBNET_ID --associate-public-ip-address\"\n\t\t\tspot_launch_specification=\"{\n\t\t\t\t\\\"KeyName\\\": \\\"$AMAZON_KEYPAIR_NAME\\\",\n\t\t\t\t\\\"ImageId\\\": \\\"$AMI_ID\\\",\n\t\t\t\t\\\"InstanceType\\\": \\\"$INSTANCE_TYPE\\\" ,\n\t\t\t\t\\\"SecurityGroupIds\\\": [\\\"$INSTANCE_SECURITYGROUP_IDS\\\"],\n\t\t\t\t\\\"SubnetId\\\": \\\"$SUBNET_ID\\\"\n\t\t\t}\"\n\t\tfi\n\n    # create the instance(s) and capture the instance id(s)\n    if [ -z \"$price\" ] ; then\n      echo -n \"Requesting $instance_count instance(s)...\"\n      attempted_instanceids=(`aws ec2 run-instances \\\n                  --key-name \"$AMAZON_KEYPAIR_NAME\" \\\n                  --instance-type \"$INSTANCE_TYPE\" \\\n                  --security-group-ids \"$INSTANCE_SECURITYGROUP_IDS\" \\\n                  --count 1:$instance_count \\\n                  $vpcsettings \\\n                  --image-id $AMI_ID \\\n                  --region $REGION \\\n                  --output text --query 'Instances[].InstanceId'`)\n    else\n      echo \"Using Spot instances...\"\n      # create the spot instance request(s) and capture the request id(s)\n      echo \"Requesting $instance_count instance(s)...\"\n\n      spot_instance_request_id=(`aws ec2 request-spot-instances \\\n                  --spot-price $price \\\n                  --instance-count $instance_count \\\n                  --region $REGION \\\n                  --launch-specification \"$spot_launch_specification\" \\\n                  --output text --query 'SpotInstanceRequests[].[SpotInstanceRequestId]'`)\n      echo \"Spot Instance request submitted, number of requests is: ${#spot_instance_request_id[@]}\"\n\n      status_check_count=0\n      status_check_limit=60\n      spot_request_fulfilled_count=0\n      spot_request_error_count=0\n      echo \"Waiting for Spot instance requests to fulfill (may take a few minutes)\"\n      while [ \"$spot_request_fulfilled_count\" -ne \"$instance_count\" ] && [ $status_check_count -lt $status_check_limit ]\n      do\n\t\t\t\tspot_request_statuses=(`aws ec2 describe-spot-instance-requests --spot-instance-request-ids ${spot_instance_request_id[@]} --region $REGION --output text --query 'SpotInstanceRequests[].[Status.Code]'`)\n\t\t\t\tspot_request_fulfilled_count=$(echo ${spot_request_statuses[@]} | tr ' ' '\\n' | grep -c fulfilled)\n\n        # if all spot requests failed exit before status_check_limit is reached\n        spot_request_errors=(canceled-before-fulfillment capacity-not-available capacity-oversubscribed price-too-low)\n        for x in \"${spot_request_statuses[@]}\" ; do\n          for i in \"${spot_request_errors[@]}\"; do\n            if [[ \"$i\" = \"$x\" ]]; then\n              spot_request_error_count=$(( $spot_request_error_count + 1))\n              break\n            fi\n          done\n        done\n\n        if [[ \"$spot_request_error_count\" = \"${#spot_instance_request_id[@]}\" ]]; then\n          echo\n          echo \"All Spot requests failed, exiting. Statuses were:\"\n          for x in \"${spot_request_statuses[@]}\" ; do\n            echo \" $x\"\n          done\n          aws ec2 cancel-spot-instance-requests --spot-instance-request-ids $(printf \" %s\" \"${spot_instance_request_id[@]}\") --region $REGION\n          exit\n        fi\n\n        echo -n \".\"\n        status_check_count=$(( $status_check_count + 1))\n        sleep 5\n      done\n\n      # create a filter for the ec2-describe-instance command, to get the instances associated with the spot requests\n\t\t\tspot_id_filter_values=\"\"\n      for x in \"${spot_instance_request_id[@]}\" ; do\n        spot_id_filter_values+=\"${x},\"\n      done\n\t\t\t# append values to filter variable and trim last comma off end of string\n\t\t\tspot_id_filter=\"Name=spot-instance-request-id,Values=${spot_id_filter_values::${#spot_id_filter_values}-1}\"\n\n      echo \"Will be using this Spot ID filter to find new instances: $spot_id_filter\"\n\n      # Instances might not be found immediatly, wait a few seconds if necessary\n      status_check_count=0\n      status_check_limit=60\n      instances_ready=false\n      while true; do\n        instance_describe=`aws ec2 describe-instances --filters $spot_id_filter --region $REGION`\n        if [[ $instance_describe != *\"Client.InvalidInstanceID.NotFound\"* ]]; then\n          instances_ready=true\n        fi\n        status_check_count=$(( $status_check_count + 1))\n        echo \".\"\n        if [ $instances_ready = true ] || [ $status_check_count -gt $status_check_limit ]; then\n          break\n        fi\n      done\n\n      attempted_instanceids=(`aws ec2 describe-instances \\\n\t\t\t\t--filters $spot_id_filter \\\n\t\t\t\t--region $REGION \\\n\t\t\t\t--output text \\\n\t\t\t\t--query 'Reservations[].Instances[].InstanceId'`)\n    fi\n\n    # check to see if Amazon returned the desired number of instances as a limit is placed restricting this and we need to handle the case where\n    # less than the expected number is given wthout failing the test.\n    countof_instanceids=${#attempted_instanceids[@]}\n    if [ \"$countof_instanceids\" = 0 ] ; then\n        echo\n        echo \"Amazon did not supply any instances, exiting\"\n        echo\n        exit\n    fi\n    if [ $countof_instanceids != $instance_count ] ; then\n        echo \"$countof_instanceids instance(s) were given by Amazon, the test will continue using only these instance(s).\"\n        instance_count=$countof_instanceids\n    else\n        echo \"success\"\n    fi\n    echo\n\n    # wait for each instance to be fully operational\n    status_check_count=0\n    status_check_limit=270\n    status_check_limit=`echo \"$status_check_limit + $countof_instanceids\" | bc` # increase wait time based on instance count\n    echo \"waiting for instance status checks to pass (this can take several minutes)...\"\n    count_passed=0\n    while [ \"$count_passed\" -ne \"$instance_count\" ] && [ $status_check_count -lt $status_check_limit ]\n    do\n        # Update progress bar\n        progressBar $countof_instanceids $count_passed\n        status_check_count=$(( $status_check_count + 1))\n        count_passed=(`aws ec2 describe-instance-status --instance-ids ${attempted_instanceids[@]} \\\n\t\t\t\t \t\t\t\t\t\t --region $REGION \\\n\t\t\t\t\t\t\t\t\t\t --output json \\\n\t\t\t\t\t\t\t\t\t\t --query 'InstanceStatuses[].InstanceStatus.Details[].Status' | grep -c passed`)\n\t\t\t\tsleep 1\n    done\n    progressBar $countof_instanceids $count_passed true\n    echo\n\n    if [ $status_check_count -lt $status_check_limit ] ; then # all hosts started ok because count_passed==instance_count\n      # set the instanceids array to use from now on - attempted = actual\n      for key in \"${!attempted_instanceids[@]}\"\n      do\n        instanceids[\"$key\"]=\"${attempted_instanceids[\"$key\"]}\"\n      done\n\n      # set hosts array\n      hosts=(`aws ec2 describe-instances --instance-ids ${attempted_instanceids[@]} \\\n\t\t\t\t\t\t--region $REGION \\\n\t\t\t\t\t\t--output text \\\n\t\t\t\t\t\t--query 'Reservations[].Instances[].PublicIpAddress'`)\n\n      # echo \"all hosts ready\"\n    else # Amazon probably failed to start a host [*** NOTE this is fairly common ***] so show a msg - TO DO. Could try to replace it with a new one?\n      original_count=$countof_instanceids\n      # filter requested instances for only those that started well\n      healthy_instanceids=(`aws ec2 describe-instance-status --instance-id ${attempted_instanceids[@]} \\\n                          --filter Name=instance-status.reachability,Values=passed \\\n                          --filter Name=system-status.reachability,Values=passed \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t--region $REGION \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t--output text \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t--query 'Reservations[].Instances[].InstanceId'`)\n\n      hosts=(`aws ec2 describe-instances --instance-ids ${healthy_instanceids[@]} \\\n\t\t\t\t\t\t--region $REGION \\\n\t\t\t\t\t\t--output text \\\n\t\t\t\t\t\t--query 'Reservations[].Instances[].PublicIpAddress'`)\n\n      if [ \"${#healthy_instanceids[@]}\" -eq 0 ] ; then\n        countof_instanceids=0\n        echo \"no instances successfully initialised, exiting\"\n        if [ \"$terminate\" = \"TRUE\" ] ; then\n        \techo\n          echo\n          # attempt to terminate any running instances - just to be sure\n          echo \"terminating instance(s)...\"\n        \t# We use attempted_instanceids here to make sure that there are no orphan instances left lying around\n          aws ec2 terminate-instances --instance-ids ${attempted_instanceids[@]} \\\n\t\t\t\t\t\t--region $REGION \\\n\t\t\t\t\t\t--output text \\\n\t\t\t\t\t\t--query 'TerminatingInstances[].InstanceId'\n          echo\n        fi\n        exit\n      else\n        countof_instanceids=${#healthy_instanceids[@]}\n      fi\n\n      # if we still see failed instances then write a message\n      countof_failedinstances=`echo \"$original_count - $countof_instanceids\"|bc`\n      if [ \"$countof_failedinstances\" -gt 0 ] ; then\n        echo \"$countof_failedinstances instances(s) failed to start, only $countof_instanceids machine(s) will be used in the test\"\n        instance_count=$countof_instanceids\n      fi\n\n      # set the array of instance ids based on only those that succeeded\n      for key in \"${!healthy_instanceids[@]}\"  # make sure you include the quotes there\n      do\n        instanceids[\"$key\"]=\"${healthy_instanceids[\"$key\"]}\"\n      done\n    fi\n\n    echo\n\n    # assign a name tag to each instance\n    echo \"assigning tags...\"\n    (aws ec2 create-tags --resources ${attempted_instanceids[@]} --tags Key=Name,Value=\"jmeter-ec2-$project\" --region $REGION)\n    wait\n    echo \"complete\"\n    echo\n\n    # if provided, assign elastic IPs to each instance\n    if [ ! -z \"$ELASTIC_IPS\" ] ; then # Not Null - same as -n\n      echo \"assigning elastic ips...\"\n      for x in \"${!instanceids[@]}\" ; do\n          (aws ec2 associate-address --instance-id ${instanceids[x]} --public-ip ${elasticips[x]} --region $REGION )\n          hosts[x]=${elasticips[x]}\n      done\n      wait\n      echo \"complete\"\n      echo\n      echo -n \"checking elastic ips...\"\n      for x in \"${!instanceids[@]}\" ; do\n      # check for ssh connectivity on the new address\n      while ssh -o StrictHostKeyChecking=no -q -i $PEM_PATH/$PEM_FILE \\\n          $USER@${hosts[x]} -p $REMOTE_PORT true && test; \\\n          do echo -n .; sleep 1; done\n      # Note. If any IP is already in use on an instance that is still running then the ssh check above will return\n      # a false positive. If this scenario is common you should put a sleep statement here.\n      done\n      wait\n      echo \"complete\"\n      echo\n    fi\n  else # the property REMOTE_HOSTS is set so we wil use this list of predefined hosts instead\n    hosts=(`echo $REMOTE_HOSTS | tr \",\" \"\\n\" | tr -d ' '`)\n    instance_count=${#hosts[@]}\n    echo\n    echo \"   -------------------------------------------------------------------------------------\"\n    echo \"       jmeter-ec2 Automation Script - Running $project.jmx over $instance_count predefined host(s)\"\n    echo \"   -------------------------------------------------------------------------------------\"\n    echo\n    echo\n\n    # Check if remote hosts are up\n    for host in ${hosts[@]} ; do\n      if [ ! \"$(ssh -q \\\n        -o StrictHostKeyChecking=no \\\n        -o \"BatchMode=yes\" \\\n        -o \"ConnectTimeout=15\" \\\n        -i \"$PEM_PATH/$PEM_FILE\" \\\n        -p $REMOTE_PORT \\\n        $USER@$host echo up)\" == \"up\" ] ; then\n        echo \"Host $host is not responding, script exiting...\"\n        echo\n        exit\n      fi\n    done\n  fi\n\n  # scp verify.sh\n  if [ \"$setup\" = \"TRUE\" ] ; then\n  \techo \"copying verify.sh to $instance_count server(s)...\"\n\n    for host in ${hosts[@]} ; do\n      (scp -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \\\n                    -i \"$PEM_PATH/$PEM_FILE\" \\\n                    -P $REMOTE_PORT \\\n                    $LOCAL_HOME/verify.sh \\\n                    $LOCAL_HOME/jmeter-ec2.properties \\\n                    $USER@$host:$REMOTE_HOME \\\n                    && echo \"done\" > $project_home/$DATETIME-$host-scpverify.out) &\n    done\n\n    # check to see if the scp call is complete (could just use the wait command here...)\n    res=0\n    while [ \"$res\" != \"$instance_count\" ] ;\n    do\n        # Update progress bar\n        progressBar $instance_count $res\n        # Count how many out files we have for the copy (if the file exists the copy completed)\n        # Note. We send stderr to dev/null in the ls cmd below to prevent file not found errors filling the screen\n        # and the sed command here trims whitespace\n        res=$(ls -l $project_home/$DATETIME*scpverify.out 2>/dev/null | wc -l | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')\n        sleep 1\n    done\n    progressBar $instance_count $res true\n    echo\n    echo\n\n    # Install test software\n    echo \"running verify.sh on $instance_count server(s)...\"\n    for host in ${hosts[@]} ; do\n      (ssh -nq -o StrictHostKeyChecking=no \\\n            -i \"$PEM_PATH/$PEM_FILE\" $USER@$host -p $REMOTE_PORT \\\n            \"$REMOTE_HOME/verify.sh $JMETER_VERSION 2>&1\"\\\n            > $project_home/$DATETIME-$host-verify.out) &\n    done\n\n    # check to see if the verify script is complete\n    res=0\n    while [ \"$res\" != \"$instance_count\" ] ; do # Installation not complete (count of matches for 'software installed' not equal to count of hosts running the test)\n      # Update progress bar\n      progressBar $instance_count $res\n      res=$(grep -c \"software installed\" $project_home/$DATETIME*verify.out \\\n          | awk -F: '{ s+=$NF } END { print s }') # the awk command here sums up the output if multiple matches were found\n      sleep 1\n    done\n    progressBar $instance_count $res true\n    echo\n    echo\n  fi\n\n  # Create a working jmx file and edit it to adjust thread counts and filepaths (leave the original jmx intact!)\n  cp $project_home/jmx/$project.jmx $project_home/working\n  working_jmx=\"$project_home/working\"\n  temp_jmx=\"$project_home/temp\"\n\n  # first filepaths (this will help with things like csv files)\n  # edit any 'stringProp filename=' references to use $REMOTE_DIR in place of whatever local path was being used\n  # we assume that the required dat file is copied into the local /data directory\n  filepaths=$(awk 'BEGIN { FS = \">\" } ; /<stringProp name=\\\"filename\\\">[^<]*<\\/stringProp>/ {print $2}' $working_jmx | cut -d'<' -f1) # pull out filepath\n  i=1\n  while read filepath ; do\n    if [ -n \"$filepath\" ] ; then # this entry is not blank\n      # extract the filename from the filepath using '/' separator\n      filename=$( echo $filepath | awk -F\"/\" '{print $NF}' )\n      endresult=\"$REMOTE_HOME\"/data/\"$filename\"\n      if [[ $filepath =~ .*\\$.* ]] ; then\n        echo \"The path $filepath contains a $ char, this currently fails the awk sub command.\"\n        echo \"You'll have to remove these from all filepaths. Sorry.\"\n        echo\n        echo \"Script exiting\"\n        exit\n      fi\n      awk '/<stringProp name=\\\"filename\\\">[^<]*<\\/stringProp>/{c++;if(c=='\"$i\"') \\\n                             {sub(\"filename\\\">'\"$filepath\"'<\",\"filename\\\">'\"$endresult\"'<\")}}1'  \\\n                             $working_jmx > $temp_jmx\n      rm $working_jmx\n      mv $temp_jmx $working_jmx\n    fi\n    # increment i\n    i=$((i+1))\n  done <<<\"$filepaths\"\n\n  # now we use the same working file to edit thread counts\n  # to cope with the problem of trying to spread 10 threads over 3 hosts (10/3 has a remainder) the script creates a unique jmx for each host\n  # and then passes out threads to them on a round robin basis\n  # as part of this we begin here by creating a working jmx file for each separate host using _$y to isolate\n  for y in \"${!hosts[@]}\" ; do\n    # for each host create a working copy of the jmx file\n    cp \"$working_jmx\" \"$working_jmx\"_\"$y\"\n  done\n  # loop through each threadgroup and then use a nested loop within that to edit the file for each host\n  # pull out the current values for each thread group\n  threadgroup_threadcounts=(`awk 'BEGIN { FS = \">\" } ; /ThreadGroup\\.num_threads\\\">[^<]*</ {print $2}' $working_jmx | cut -d'<' -f1`) # put the current thread counts into variable\n  threadgroup_names=(`awk 'BEGIN { FS = \"\\\"\" } ; /ThreadGroup\\\" testname=\\\"[^\\\"]*\\\"/ {print $6}' $working_jmx`) # capture each thread group name\n\n  # first we check to make sure each threadgroup_threadcounts is numeric\n  for n in ${!threadgroup_threadcounts[@]} ; do\n    case ${threadgroup_threadcounts[$n]} in\n       ''|*[!0-9]*)\n           echo \"Error: Thread Group: ${threadgroup_names[$n]} has the value: ${threadgroup_threadcounts[$n]}, which is not numeric - Thread Count must be numeric!\"\n           echo\n           echo \"Script exiting...\"\n           echo\n           exit;;\n           *);;\n    esac\n  done\n\n  # get count of thread groups, show results to screen\n  countofthreadgroups=${#threadgroup_threadcounts[@]}\n  echo \"editing thread counts...\"\n  echo\n  echo \" - $project.jmx has $countofthreadgroups threadgroup(s) - [inc. those disabled]\"\n\n  # sum up the thread counts\n  sumofthreadgroups=0\n  for n in ${!threadgroup_threadcounts[@]} ; do\n    # populate an array of the original thread counts (used in the find and replace when editing the jmx)\n    orig_threadcounts[$n]=${threadgroup_threadcounts[$n]}\n    # create a total of the original thread counts\n    sumofthreadgroups=$(echo \"$sumofthreadgroups+${threadgroup_threadcounts[$n]}\" | bc)\n  done\n\n  # adjust each thread count based on percent\n  sumofadjthreadgroups=0\n  for n in \"${!orig_threadcounts[@]}\" ; do\n    # get a new thread count to 2 decimal places\n    float=$(echo \"scale=2; ${orig_threadcounts[$n]}*($percent/100)\" | bc)\n    # round to integer\n    new_threadcounts[$n]=$(echo \"($float+0.5)/1\" | bc)\n    if [ \"${new_threadcounts[$n]}\" -eq \"0\" ] ; then\n    \techo \" - Thread group ${threadgroup_names[$n]} has ${orig_threadcounts[$n]} threads, $percent percent of this is $float which rounds to 0, so we're going to set it to 1 instead.\"\n    \tnew_threadcounts[$n]=1\n    \tsumofadjthreadgroups=$(echo \"$sumofadjthreadgroups+1\" | bc)\n    fi\n  done\n\n  # Now we sum up the thread counts and print a total\n  for n in ${!new_threadcounts[@]} ; do\n  \tsumofadjthreadgroups=$(echo \"$sumofadjthreadgroups+${new_threadcounts[$n]}\" | bc)\n  done\n\n  echo \" - There are $sumofthreadgroups threads in the test plan, this test is set to execute $percent percent of these, so will run using $sumofadjthreadgroups threads\"\n\n  # now we loop through each thread group, editing a separate file for each host each iteration (nested loop)\n  for i in ${!threadgroup_threadcounts[@]} ; do\n  \t# using modulo we distribute the threads over all hosts, building the array 'threads'\n  \t# taking 10(threads)/3(hosts) as an example you would expect two hosts to be given 3 threads and one to be given 4.\n  \tfor (( x=1; x<=${new_threadcounts[$i]}; x++ )); do\n  \t\t: $(( threads[$(( $x % ${#hosts[@]} ))]++ ))\n  \tdone\n\n  \t# here we loop through every host, editing the jmx file and using a temp file to carry the changes over\n  \tfor y in \"${!hosts[@]}\" ; do\n  \t\t# we're already in a loop for each thread group but awk will parse the entire file each time it is called so we need to\n  \t\t# use an index to know when to make the edit\n  \t\t# when c (awk's index) matches i (the main for loop's index) then a substitution is made\n\n  \t\t# first check for any null values (caused by lots of hosts and not many threads)\n  \t\tthreadgroupschanged=0\n  \t\tif [ -z \"${threads[$y]}\" ] ; then\n  \t\t\tthreads[$y]=1\n  \t\t\tthreadgroupschanged=$(echo \"$threadgroupschanged+1\" | bc)\n  \t\tfi\n  \t\tif [ \"$threadgroupschanged\" == \"1\" ] ; then\n  \t\t\techo \" - $threadgroupschanged thread groups were allocated zero threads, this happens because the total allocated threads to a group is less than the $instance_count instances being used.\"\n  \t\t\techo \"   To get around this the script gave each group an extra thread, a better solution is to revise the test configuration to use more threads / less instances\"\n  \t\tfi\n  \t\tfindstr=\"threads\\\">\"${orig_threadcounts[$i]}\n  \t\treplacestr=\"threads\\\">\"${threads[$y]}\n  \t\tawk -v \"findthis=$findstr\" -v \"replacewiththis=$replacestr\" \\\n  \t\t\t'BEGIN{c=0} \\\n  \t\t\t/ThreadGroup\\.num_threads\\\">[^<]*</ \\\n  \t\t\t{if(c=='\"$i\"'){sub(findthis,replacewiththis)};c++}1' \\\n  \t\t\t\"$working_jmx\"_\"$y\" > \"$temp_jmx\"_\"$y\"\n\n  \t\t# using awk requires the use of a temp file to save the results of the command, update the working file with this file\n  \t\trm \"$working_jmx\"_\"$y\"\n  \t\tmv \"$temp_jmx\"_\"$y\" \"$working_jmx\"_\"$y\"\n  \tdone\n\n  \t# write update to screen - removed 23/04/2012\n  \t# echo \"...$i) ${threadgroup_names[$i]} has ${threadgroup_threadcounts[$i]} thread(s), to be distributed over $instance_count instance(s)\"\n\n  \tunset threads\n  done\n  echo\n  echo \"thread counts updated\"\n  echo\n\n  # scp the test files onto each host\n  echo -n \"copying test files to $instance_count server(s)...\"\n\n  # scp jmx dir\n  echo -n \"jmx files..\"\n  for y in \"${!hosts[@]}\" ; do\n      (scp -q -C -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r \\\n                                    -i \"$PEM_PATH/$PEM_FILE\" -P $REMOTE_PORT \\\n                                    $project_home/working_$y \\\n                                    $USER@${hosts[$y]}:$REMOTE_HOME/execute.jmx) &\n  done\n  wait\n  echo -n \"done....\"\n\n  # scp data dir\n  if [ \"$setup\" = \"TRUE\" ] ; then\n  \tif [ -r $project_home/data ] ; then # don't try to upload this optional dir if it is not present\n      echo -n \"data dir..\"\n      for host in ${hosts[@]} ; do\n          (scp -q -C -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r \\\n                                        -i \"$PEM_PATH/$PEM_FILE\" -P $REMOTE_PORT \\\n                                        $project_home/data \\\n                                        $USER@$host:$REMOTE_HOME/) &\n      done\n      wait\n      echo -n \"done....\"\n    fi\n\n    # scp jmeter.properties\n    if [ -r $LOCAL_HOME/jmeter.properties ] ; then # don't try to upload this optional file if it is not present\n      echo -n \"jmeter.properties..\"\n      for host in ${hosts[@]} ; do\n          (scp -q -C -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \\\n                                        -i \"$PEM_PATH/$PEM_FILE\" -P $REMOTE_PORT \\\n                                        $LOCAL_HOME/jmeter.properties \\\n                                        $USER@$host:$REMOTE_HOME/$JMETER_VERSION/bin/) &\n      done\n      wait\n      echo -n \"done....\"\n    fi\n\n    # scp system.properties\n    if [ -r $LOCAL_HOME/system.properties ] ; then # don't try to upload this optional file if it is not present\n      echo -n \"system.properties..\"\n      for host in ${hosts[@]} ; do\n          (scp -q -C -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \\\n                                        -i \"$PEM_PATH/$PEM_FILE\" -P $REMOTE_PORT \\\n                                        $LOCAL_HOME/system.properties \\\n                                        $USER@$host:$REMOTE_HOME/$JMETER_VERSION/bin/) &\n      done\n      wait\n      echo -n \"done....\"\n    fi\n\n    # scp keystore\n    if [ -r $LOCAL_HOME/keystore.jks ] ; then # don't try to upload this optional file if it is not present\n      echo -n \"keystore.jks..\"\n      for host in ${hosts[@]} ; do\n          (scp -q -C -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \\\n                                        -i \"$PEM_PATH/$PEM_FILE\" -P $REMOTE_PORT \\\n                                        $LOCAL_HOME/keystore.jks \\\n                                        $USER@$host:$REMOTE_HOME) &\n      done\n      wait\n      echo -n \"done....\"\n    fi\n\n    # scp jmeter execution file\n    if [ -r $LOCAL_HOME/jmeter ] ; then # don't try to upload this optional file if it is not present\n      echo -n \"jmeter execution file...\"\n      for host in ${hosts[@]} ; do\n          (scp -q -C -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \\\n                                        -i \"$PEM_PATH/$PEM_FILE\" -P $REMOTE_PORT \\\n                                        $LOCAL_HOME/jmeter $LOCAL_HOME/jmeter \\\n                                        $USER@$host:$REMOTE_HOME/$JMETER_VERSION/bin/) &\n      done\n      wait\n      echo -n \"done....\"\n    fi\n\n    # scp any custom jar files\n    if [ -r $LOCAL_HOME/plugins ] ; then # don't try to upload this optional dir if it is not present\n      echo -n \"custom jar file(s)...\"\n      for host in ${hosts[@]} ; do\n          (scp -q -C -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \\\n                                        -i \"$PEM_PATH/$PEM_FILE\" -P $REMOTE_PORT \\\n                                        $LOCAL_HOME/plugins/*.jar \\\n                                        $USER@$host:$REMOTE_HOME/$JMETER_VERSION/lib/ext/) &\n      done\n      wait\n      echo -n \"done....\"\n    fi\n\n    # scp any project specific custom jar files\n\t    if [ -r $project_home/plugins ] ; then # don't try to upload this optional dir if it is not present\n      echo -n \"project specific jar file(s)...\"\n      for host in ${hosts[@]} ; do\n          (scp -q -C -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \\\n                                        -i \"$PEM_PATH/$PEM_FILE\" -P $REMOTE_PORT \\\n                                        $project_home/plugins/*.jar \\\n                                        $USER@$host:$REMOTE_HOME/$JMETER_VERSION/lib/ext/) &\n      done\n      wait\n      echo -n \"done....\"\n    fi\n\n    echo \"all files uploaded\"\n    echo\n  fi\n\n  # Start JMeter\n  echo \"starting jmeter on:\"\n  for host in ${hosts[@]} ; do\n    echo $host\n  done\n  for counter in ${!hosts[@]} ; do\n      ( ssh -nq -o StrictHostKeyChecking=no \\\n      -p $REMOTE_PORT \\\n      -i \"$PEM_PATH/$PEM_FILE\" $USER@${hosts[$counter]} \\\n      $REMOTE_HOME/$JMETER_VERSION/bin/jmeter.sh -n \\\n      -t $REMOTE_HOME/execute.jmx \\\n      -l $REMOTE_HOME/$project-$DATETIME-$counter.jtl \\\n      >> $project_home/$DATETIME-${hosts[$counter]}-jmeter.out ) &\n  done\n  echo\n  echo\n}\n\nfunction runtest() {\n  # sleep_interval - how often we poll the jmeter output for results\n  # this value should be the same as the Generate Summary Results interval set in jmeter.properties\n  # to be certain, we read the value in here and adjust the wait to match (this prevents lots of duplicates being written to the screen)\n  sleep_interval=$(awk 'BEGIN { FS = \"=\" } ; /summariser.interval/ {print $2}' $LOCAL_HOME/jmeter.properties)\n  runningtotal_seconds=$(echo \"$RUNNINGTOTAL_INTERVAL * $sleep_interval\" | bc)\n\t# $epoch is used when importing to mysql (if enabled) because we want unix timestamps, not datetime, as this works better when graphing.\n\tepoch_seconds=$(date +%s)\n\tepoch_milliseconds=$(echo \"$epoch_seconds* 1000\" | bc) # milliseconds since Mick Jagger became famous\n\tstart_date=$(date) # warning, epoch and start_date do not (absolutely) equal each other!\n\n  echo \"JMeter started at $start_date\"\n  echo \"====================== START OF JMETER-EC2 TEST =================================\"\n  echo \"> [updates: every $sleep_interval seconds | running total: every $runningtotal_seconds seconds]\"\n  echo \">\"\n  echo \"> waiting for the test to start...to stop the test while it is running, press CTRL-C\"\n  teststarted=1\n  # TO DO: Are thse required?\n  count_total=0\n  avg_total=0\n  count_overallhosts=0\n  avg_overallhosts=0\n  tps_overallhosts=0\n  errors_overallhosts=0\n  i=1\n  firstmodmatch=\"TRUE\"\n  res=0\n  while [ $res != $instance_count ] ; do # test not complete (count of matches for 'end of run' not equal to count of hosts running the test)\n    # gather results data and write to screen for each host\n    #while read host ; do\n    for host in ${hosts[@]} ; do\n      check=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results =\" | tail -1 | awk '{print $1}') # make sure the test has really started to write results to the file\n      if [[ -n \"$check\" ]] ; then # not null\n        if [ $check == \"Generate\" ] ; then # test has begun\n          screenupdate=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results +\" | tail -1)\n          echo \"> $(date +%T): $screenupdate | host: $host\" # write results to screen\n\n          # get the latest values\n          count=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results +\" | tail -1 | awk '{print $5}') # pull out the current count\n          avg=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results +\" | tail -1 | awk '{print $11}') # pull out current avg\n          tps_raw=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results +\" | tail -1 | awk '{print $9}') # pull out current tps\n          errors_raw=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results +\" | tail -1 | awk '{print $17}') # pull out current errors\n          tps=${tps_raw%/s} # remove the trailing '/s'\n\n          # get the latest summary values\n          count_total=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results =\" | tail -1 | awk '{print $5}')\n          avg_total=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results =\" | tail -1 | awk '{print $11}')\n          tps_total_raw=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results =\" | tail -1 | awk '{print $9}')\n          tps_recent_raw=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results +\" | tail -1 | awk '{print $9}')\n          tps_total=${tps_total_raw%/s} # remove the trailing '/s'\n          tps_recent=${tps_recent_raw%/s} # remove the trailing '/s'\n          errors_total=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results =\" | tail -1 | awk '{print $17}')\n\n          count_overallhosts=$(echo \"$count_overallhosts+$count_total\" | bc) # add the value from this host to the values from other hosts\n          avg_overallhosts=$(echo \"$avg_overallhosts+$avg\" | bc)\n          tps_overallhosts=$(echo \"$tps_overallhosts+$tps_total\" | bc)\n          tps_recent_overallhosts=$(echo \"$tps_recent_overallhosts+$tps_recent\" | bc)\n          errors_overallhosts=$(echo \"$errors_overallhosts+$errors_total\" | bc) # add the value from this host to the values from other hosts\n        fi\n      fi\n    done #<<<\"${hosts_str}\" # next host\n\n    # calculate the average respone time over all hosts\n    avg_overallhosts=$(echo \"$avg_overallhosts/$instance_count\" | bc)\n\n    # every RUNNINGTOTAL_INTERVAL loops print a running summary (if each host is running)\n    mod=$(echo \"$i % $RUNNINGTOTAL_INTERVAL\"|bc)\n    if [ $mod == 0 ] ; then\n      if [ $firstmodmatch == \"TRUE\" ] ; then # don't write summary results the first time (because it's not useful)\n        firstmodmatch=\"FALSE\"\n      else\n        # first check the results files to make sure data is available\n        wait=0\n        for host in ${hosts[@]} ; do\n          result_count=$(grep -c \"Results =\" $project_home/$DATETIME-$host-jmeter.out)\n          if [ $result_count = 0 ] ; then\n            wait=1\n          fi\n        done\n\n        # now write out the data to the screen\n        if [ $wait == 0 ] ; then # each file is ready to summarise\n          for host in ${hosts[@]} ; do\n            screenupdate=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results =\" | tail -1)\n            echo \"> $(date +%T): $screenupdate | host: $host\" # write results to screen\n          done\n          echo \">\"\n          echo \"> $(date +%T): [RUNNING TOTALS] total count: $count_overallhosts, current avg: $avg_overallhosts (ms), average tps: $tps_overallhosts (p/sec), recent tps: $tps_recent_overallhosts (p/sec), total errors: $errors_overallhosts\"\n          echo \">\"\n        fi\n      fi\n    fi\n    i=$(( $i + 1))\n\n    sleep $sleep_interval\n\n    # we rely on JM to keep track of overall test totals (via Results =) so we only need keep count of values over multiple instances\n    # there's no need for a running total outside of this loop so we reinitialise the vars here.\n    count_total=0\n    avg_total=0\n    count_overallhosts=0\n    avg_overallhosts=0\n    tps_overallhosts=0\n    tps_recent_overallhosts=0\n    errors_overallhosts=0\n\n    # check to see if the test is complete\n    res=$(grep -c \"end of run\" $project_home/$DATETIME*jmeter.out | awk -F: '{ s+=$NF } END { print s }')\n  done # test complete\n\n  # now the test is complete calculate a final summary and write to the screen\n  for host in ${hosts[@]} ; do\n    # get the final summary values\n    count_total=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results =\" | tail -1 | awk '{print $5}')\n    avg_total=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results =\" | tail -1 | awk '{print $11}')\n    tps_total_raw=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results =\" | tail -1 | awk '{print $9}')\n    tps_total=${tps_total_raw%/s} # remove the trailing '/s'\n    tps_recent_raw=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results +\" | tail -1 | awk '{print $9}')\n    tps_recent=${tps_recent_raw%/s} # remove the trailing '/s'\n    errors_total=$(tail -10 $project_home/$DATETIME-$host-jmeter.out | grep \"Results =\" | tail -1 | awk '{print $17}')\n\n    # running totals\n    count_overallhosts=$(echo \"$count_overallhosts+$count_total\" | bc) # add the value from this host to the values from other hosts\n    avg_overallhosts=$(echo \"$avg_overallhosts+$avg_total\" | bc)\n    tps_overallhosts=$(echo \"$tps_overallhosts+$tps_total\" | bc) # add the value from this host to the values from other hosts\n    tps_recent_overallhosts=$(echo \"$tps_recent_overallhosts+$tps_recent\" | bc)\n    errors_overallhosts=$(echo \"$errors_overallhosts+$errors_total\" | bc) # add the value from this host to the values from other hosts\n  done\n\n  # calculate averages over all hosts\n  avg_overallhosts=$(echo \"$avg_overallhosts/$instance_count\" | bc)\n}\n\nfunction runcleanup() {\n\t# Turn off the CTRL-C trap now that we are already in the runcleanup function\n\ttrap - INT\n\n  if [ \"$teststarted\" -eq 1 ] ; then\n    # display final results\n    echo \">\"\n    echo \">\"\n    echo \"> $(date +%T): [FINAL RESULTS] total count: $count_overallhosts, overall avg: $avg_overallhosts (ms), overall tps: $tps_overallhosts (p/sec), recent tps: $tps_recent_overallhosts (p/sec), errors: $errors_overallhosts\"\n    echo \">\"\n    echo \"===================================================================== END OF JMETER-EC2 TEST ==================================================================================\"\n    echo\n    echo\n\n    # download the results\n    for i in ${!hosts[@]} ; do\n      echo -n \"downloading results from ${hosts[$i]}...\"\n      scp -q -C -o UserKnownHostsFile=/dev/null \\\n                                   -o StrictHostKeyChecking=no \\\n                                   -i \"$PEM_PATH/$PEM_FILE\" \\\n                                   -P $REMOTE_PORT \\\n                                   $USER@${hosts[$i]}:$REMOTE_HOME/$project-*.jtl \\\n                                   $project_home/\n      # Append the hostname\n      sed \"s/$/,\"${hosts[$i]}\"/\" $project_home/$project-$DATETIME-$i.jtl >> $project_home/$project-$DATETIME-$i-appended.jtl\n      rm $project_home/$project-$DATETIME-$i.jtl\n      echo \"$project_home/$project-$DATETIME-$i.jtl complete\"\n    done\n    echo\n\n    # process the files into one jtl results file\n    echo -n \"processing results...\"\n    for (( i=0; i<$instance_count; i++ )) ; do\n      cat $project_home/$project-$DATETIME-$i-appended.jtl >> $project_home/$project-$DATETIME-grouped.jtl\n      rm $project_home/$project-$DATETIME-$i-appended.jtl # removes the individual results files (from each host) - might be useful to some people to keep these files?\n    done\n\n    # Sort File\n    sort $project_home/$project-$DATETIME-grouped.jtl >> $project_home/$project-$DATETIME-sorted.jtl\n\n    # Remove blank lines\n    sed '/^$/d' $project_home/$project-$DATETIME-sorted.jtl >> $project_home/$project-$DATETIME-noblanks.jtl\n\n    # Remove any lines containing \"0,0,Error:\" - which seems to be an intermittant bug in JM where the getTimestamp call fails with a nullpointer\n    sed '/^0,0,Error:/d' $project_home/$project-$DATETIME-noblanks.jtl >> $project_home/$project-$DATETIME-complete.jtl\n\n    # Calclulate test duration\n    start_time=$(head -1 $project_home/$project-$DATETIME-complete.jtl | cut -d',' -f1)\n    end_time=$(tail -1 $project_home/$project-$DATETIME-complete.jtl | cut -d',' -f1)\n    duration=$(echo \"$end_time-$start_time\" | bc)\n    if ! [ \"$duration\" -gt 0 ] ; then\n      duration=0;\n    fi\n  fi\n\n  # terminate any running instances created\n  if [ -z \"$REMOTE_HOSTS\" ]; then\n  \tif [ \"$terminate\" = \"TRUE\" ] ; then\n      echo\n      echo\n      echo \"terminating instance(s)...\"\n      # We use attempted_instanceids here to make sure that there are no orphan instances left lying around\n\t\t\taws ec2 terminate-instances --instance-ids ${attempted_instanceids[@]} \\\n\t\t\t\t--region $REGION \\\n\t\t\t\t--output text \\\n\t\t\t\t--query 'TerminatingInstances[].InstanceId'\n      echo\n  \tfi\n  fi\n\n\t# Tidy up\n  if [ -e \"$project_home/$project-$DATETIME-grouped.jtl\" ] ; then rm $project_home/$project-$DATETIME-grouped.jtl ; fi\n  if [ -e \"$project_home/$project-$DATETIME-sorted.jtl\" ] ; then rm $project_home/$project-$DATETIME-sorted.jtl ; fi\n  if [ -e \"$project_home/$project-$DATETIME-noblanks.jtl\" ] ; then rm $project_home/$project-$DATETIME-noblanks.jtl ; fi\n  if [ -e \"$project_home/$project-$DATETIME-complete.jtl\" ] ; then\n    mkdir -p $project_home/results/\n    mv $project_home/$project-$DATETIME-complete.jtl $project_home/results/\n  fi\n\n  # tidy up working files\n  # for debugging purposes you could comment out these lines\n  rm $project_home/$DATETIME*.out\n  rm $project_home/working*\n\n\n  echo\n  echo \"   -------------------------------------------------------------------------------------\"\n  echo \"                  jmeter-ec2 Automation Script - COMPLETE\"\n  echo\n  if [ \"$teststarted\" -eq 1 ] ; then\n    echo \"   Test Results: $project_home/results/$project-$DATETIME-complete.jtl\"\n  fi\n  echo \"   -------------------------------------------------------------------------------------\"\n  echo\n}\n\n\nprogressBarWidth=50\nspinnerIndex=1\nsp=\"/-\\|\"\n\n# Function to draw progress bar\nprogressBar() {\n  taskCount=$1\n  tasksDone=$2\n  progressDone=$3\n  # Calculate number of fill/empty slots in the bar\n  progress=$(echo \"$progressBarWidth/$taskCount*$tasksDone\" | bc -l)\n  fill=$(printf \"%.0f\\n\" $progress)\n  if [ $fill -gt $progressBarWidth ]; then\n    fill=$progressBarWidth\n  fi\n  empty=$(($fill-$progressBarWidth))\n\n  # Percentage Calculation\n  progressPercent=$(echo \"100/$taskCount*$tasksDone\" | bc -l)\n  progressPercent=$(printf \"%0.2f\\n\" $progressPercent)\n  if [[ -n \"${progressPercent}\" && $(echo \"$progressPercent>100\" | bc) -gt 0 ]]; then\n    progressPercent=\"100.00\"\n  fi\n\n  # Output to screen\n  printf \"\\r[\"\n  printf \"%${fill}s\" '' | tr ' ' \\#\n  printf \"%${empty}s\" '' | tr ' ' \" \"\n  printf \"] $progressPercent%% - ($tasksDone of $taskCount) \"\n  if [ $progressDone ] ; then\n    printf \" - Done.\"\n  else\n    printf \" \\b${sp:spinnerIndex++%${#sp}:1} \"\n  fi\n}\n\nfunction control_c(){\n\t# Turn off the CTRL-C trap now that it has been invoked once already\n\ttrap - INT\n\n  if [ \"$teststarted\" -eq 1 ] ; then\n    # Stop the running test on each host\n    echo\n    echo \"> Stopping test...\"\n    for f in ${!hosts[@]} ; do\n        ( ssh -nq -o StrictHostKeyChecking=no \\\n        -i \"$PEM_PATH/$PEM_FILE\" $USER@${hosts[$f]} -p $REMOTE_PORT \\\n        $REMOTE_HOME/$JMETER_VERSION/bin/stoptest.sh ) &\n    done\n    wait\n    echo \">\"\n  fi\n\n  runcleanup\n  exit\n}\n\n# trap keyboard interrupt (control-c)\ntrap control_c SIGINT\n\ncheck_prereqs\nrunsetup\nruntest\nruncleanup\n"
  },
  {
    "path": "jmeter.properties",
    "content": "################################################################################\n# Apache JMeter Property file\n################################################################################\n\n##   Licensed to the Apache Software Foundation (ASF) under one or more\n##   contributor license agreements.  See the NOTICE file distributed with\n##   this work for additional information regarding copyright ownership.\n##   The ASF licenses this file to You under the Apache License, Version 2.0\n##   (the \"License\"); you may not use this file except in compliance with\n##   the License.  You may obtain a copy of the License at\n## \n##       http://www.apache.org/licenses/LICENSE-2.0\n## \n##   Unless required by applicable law or agreed to in writing, software\n##   distributed under the License is distributed on an \"AS IS\" BASIS,\n##   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n##   See the License for the specific language governing permissions and\n##   limitations under the License.\n\n################################################################################\n#\n#                      THIS FILE SHOULD NOT BE MODIFIED\n#\n# This avoids having to re-apply the modifications when upgrading JMeter\n# Instead only user.properties should be modified:\n# 1/ copy the property you want to modify to user.properties from jmeter.properties\n# 2/ Change its value there\n#\n################################################################################\n\n#Preferred GUI language. Comment out to use the JVM default locale's language.\n#language=en\n\n# Additional locale(s) to add to the displayed list.\n# The current default list is: en, fr, de, no, es, tr, ja, zh_CN, zh_TW, pl, pt_BR\n# [see JMeterMenuBar#makeLanguageMenu()]\n# The entries are a comma-separated list of language names\n#locales.add=zu\n\n# Netscape HTTP Cookie file\ncookies=cookies\n\n#---------------------------------------------------------------------------\n# File format configuration for JMX and JTL files\n#---------------------------------------------------------------------------\n\n# Properties:\n# file_format          - affects both JMX and JTL files\n# file_format.testplan - affects JMX files only\n# file_format.testlog  - affects JTL files only\n#\n# Possible values are:\n# 2.1 - initial format using XStream\n# 2.2 - updated format using XStream, with shorter names\n\n# N.B. format 2.0 (Avalon) is no longer supported\n\n#---------------------------------------------------------------------------\n# XML Parser\n#---------------------------------------------------------------------------\n\n# XML Reader(Parser) - Must implement SAX 2 specs\nxml.parser=org.apache.xerces.parsers.SAXParser\n\n# Path to a Properties file containing Namespace mapping in the form\n# prefix=Namespace\n# Example:\n# ns=http://biz.aol.com/schema/2006-12-18\n#xpath.namespace.config=\n\n#---------------------------------------------------------------------------\n# SSL configuration\n#---------------------------------------------------------------------------\n\n## SSL System properties are now in system.properties\n\n# JMeter no longer converts javax.xxx property entries in this file into System properties.\n# These must now be defined in the system.properties file or on the command-line.\n# The system.properties file gives more flexibility.\n\n# By default, SSL session contexts are now created per-thread, rather than being shared.\n# The original behaviour can be enabled by setting the JMeter property:\n#https.sessioncontext.shared=true\n\n# Default HTTPS protocol level:\n#https.default.protocol=TLS\n# This may need to be changed here (or in user.properties) to:\n#https.default.protocol=SSLv3\n\n# List of protocols to enable. You may have to select only a subset if you find issues with target server.\n# This is needed when server does not support Socket version negotiation, this can lead to:\n# javax.net.ssl.SSLPeerUnverifiedException: peer not authenticated\n# java.net.SocketException: Connection reset\n# see https://issues.apache.org/bugzilla/show_bug.cgi?id=54759\n#https.socket.protocols=SSLv2Hello SSLv3 TLSv1\n\n# Control if we allow reuse of cached SSL context between iterations\n# set the value to 'false' to reset the SSL context each iteration\n#https.use.cached.ssl.context=true\n\n# Start and end index to be used with keystores with many entries\n# The default is to use entry 0, i.e. the first\n#https.keyStoreStartIndex=0\n#https.keyStoreEndIndex=0\n\n#---------------------------------------------------------------------------\n# Look and Feel configuration\n#---------------------------------------------------------------------------\n\n#Classname of the Swing default UI\n#\n# The LAF classnames that are available are now displayed as ToolTip text\n# when hovering over the Options/Look and Feel selection list.\n#\n# You can either use a full class name, as shown above,\n# or one of the strings \"System\" or \"CrossPlatform\" which means\n#  JMeter will use the corresponding string returned by UIManager.get<name>LookAndFeelClassName()\n\n# LAF can be overridden by os.name (lowercased, spaces replaced by '_')\n# Sample os.name LAF:\n#jmeter.laf.windows_xp=javax.swing.plaf.metal.MetalLookAndFeel\n\n# Failing that, the OS family = os.name, but only up to first space:\n# Sample OS family LAF:\n#jmeter.laf.windows=com.sun.java.swing.plaf.windows.WindowsLookAndFeel\n\n# Mac apparently looks better with the System LAF\njmeter.laf.mac=System\n\n# Failing that, the JMeter default laf can be defined:\n#jmeter.laf=System\n\n# If none of the above jmeter.laf properties are defined, JMeter uses the CrossPlatform LAF.\n# This is because the CrossPlatform LAF generally looks better than the System LAF.\n# See https://issues.apache.org/bugzilla/show_bug.cgi?id=52026 for details\n# N.B. the laf can be defined in user.properties.\n\n# LoggerPanel display\n# default to false\n#jmeter.loggerpanel.display=false\n\n# Enable LogViewer Panel to receive log event even if closed\n# Enabled since 2.12\n# Note this has some impact on performances, but as GUI mode must\n# not be used for Load Test it is acceptable\n#jmeter.loggerpanel.enable_when_closed=true\n\n# Error/Fatal Log count display\n# defaults to true\n#jmeter.errorscounter.display=true\n\n# Max characters kept in LoggerPanel, default to 80000 chars\n# O means no limit\n#jmeter.loggerpanel.maxlength=80000\n\n# Toolbar display\n# default:\n#jmeter.toolbar.display=true\n# Toolbar icon definitions\n#jmeter.toolbar.icons=org/apache/jmeter/images/toolbar/icons-toolbar.properties\n# Toolbar list\n#jmeter.toolbar=new,open,close,save,save_as_testplan,|,cut,copy,paste,|,expand,collapse,toggle,|,test_start,test_stop,test_shutdown,|,test_start_remote_all,test_stop_remote_all,test_shutdown_remote_all,|,test_clear,test_clear_all,|,search,search_reset,|,function_helper,help\n# Toolbar icons default size: 22x22. Available sizes are: 22x22, 32x32, 48x48\n#jmeter.toolbar.icons.size=22x22\n\n# Icon definitions\n# default:\n#jmeter.icons=org/apache/jmeter/images/icon.properties\n# alternate:\n#jmeter.icons=org/apache/jmeter/images/icon_1.properties\n\n#Components to not display in JMeter GUI (GUI class name or static label)\n# These elements are deprecated: HTML Parameter Mask,HTTP User Parameter Modifier, Webservice (SOAP) Request\nnot_in_menu=org.apache.jmeter.protocol.http.modifier.gui.ParamModifierGui, HTTP User Parameter Modifier, org.apache.jmeter.protocol.http.control.gui.WebServiceSamplerGui\n\n# Number of items in undo history\n# Feature is disabled by default (0)\n# Set it to a number > 0 (25 can be a good default)\n# The bigger it is, the more it consumes memory\n#undo.history.size=0\n\n#---------------------------------------------------------------------------\n# Remote hosts and RMI configuration\n#---------------------------------------------------------------------------\n\n# Remote Hosts - comma delimited\nremote_hosts=127.0.0.1\n#remote_hosts=localhost:1099,localhost:2010\n\n# RMI port to be used by the server (must start rmiregistry with same port)\n#server_port=1099\n\n# To change the port to (say) 1234:\n# On the server(s)\n# - set server_port=1234\n# - start rmiregistry with port 1234\n# On Windows this can be done by:\n# SET SERVER_PORT=1234\n# JMETER-SERVER\n#\n# On Unix:\n# SERVER_PORT=1234 jmeter-server\n#\n# On the client:\n# - set remote_hosts=server:1234\n\n# Parameter that controls the RMI port used by the RemoteSampleListenerImpl (The Controler)\n# Default value is 0 which means port is randomly assigned\n# You may need to open Firewall port on the Controller machine\n#client.rmi.localport=0\n\n# When distributed test is starting, there may be several attempts to initialize\n# remote engines. By default, only single try is made. Increase following property\n# to make it retry for additional times\n#client.tries=1\n\n# If there is initialization retries, following property sets delay between attempts\n#client.retries_delay=5000\n\n# When all initialization tries was made, test will fail if some remote engines are failed\n# Set following property to true to ignore failed nodes and proceed with test \n#client.continue_on_fail=false\n\n# To change the default port (1099) used to access the server:\n#server.rmi.port=1234\n\n# To use a specific port for the JMeter server engine, define\n# the following property before starting the server:\n#server.rmi.localport=4000\n\n# From JMeter 2.3.1, the jmeter server creates the RMI registry as part of the server process.\n# To stop the server creating the RMI registry:\n#server.rmi.create=false\n\n# From JMeter 2.3.1, define the following property to cause JMeter to exit after the first test\n#server.exitaftertest=true\n\n# Prefix used by IncludeController when building file name\n#includecontroller.prefix=\n\n#---------------------------------------------------------------------------\n#         Logging Configuration\n#---------------------------------------------------------------------------\n\n# Note: JMeter uses Avalon (Excalibur) LogKit\n\n# Logging Format\n# see http://excalibur.apache.org/apidocs/org/apache/log/format/PatternFormatter.html\n\n#\n# Default format:\n#log_format=%{time:yyyy/MM/dd HH:mm:ss} %5.5{priority} - %{category}: %{message} %{throwable}\n# \\n is automatically added to the end of the string\n#\n# Predefined formats in the JMeter LoggingManager:\n#log_format_type=default\n#log_format_type=thread_prefix\n#log_format_type=thread_suffix\n# default is as above\n# thread_prefix adds the thread name as a prefix to the category\n# thread_suffix adds the thread name as a suffix to the category\n# Note that thread name is not included by default, as it requires extra processing.\n#\n# To change the logging format, define either log_format_type or log_format\n# If both are defined, the type takes precedence\n# Note that these properties cannot be defined using the -J or -D JMeter\n# command-line flags, as the format will have already been determined by then\n# However, they can be defined as JVM properties\n\n#Logging levels for the logging categories in JMeter.  Correct values are FATAL_ERROR, ERROR, WARN, INFO, and DEBUG\n# To set the log level for a package or individual class, use:\n# log_level.[package_name].[classname]=[PRIORITY_LEVEL]\n# But omit \"org.apache\" from the package name.  The classname is optional.  Further examples below.\n\nlog_level.jmeter=INFO\nlog_level.jmeter.junit=DEBUG\n#log_level.jmeter.control=DEBUG\n#log_level.jmeter.testbeans=DEBUG\n#log_level.jmeter.engine=DEBUG\n#log_level.jmeter.threads=DEBUG\n#log_level.jmeter.gui=WARN\n#log_level.jmeter.testelement=DEBUG\n#log_level.jmeter.util=WARN\n#log_level.jmeter.protocol.http=DEBUG\n# For CookieManager, AuthManager etc:\n#log_level.jmeter.protocol.http.control=DEBUG\n#log_level.jmeter.protocol.ftp=WARN\n#log_level.jmeter.protocol.jdbc=DEBUG\n#log_level.jmeter.protocol.java=WARN\n#log_level.jmeter.testelements.property=DEBUG\nlog_level.jorphan=INFO\n  \n\n#Log file for log messages.\n# You can specify a different log file for different categories via:\n# log_file.[category]=[filename]\n# category is equivalent to the package/class names described above\n\n# Combined log file (for jmeter and jorphan)\n#log_file=jmeter.log\n# To redirect logging to standard output, try the following:\n# (it will probably report an error, but output will be to stdout)\n#log_file=\n\n# Or define separate logs if required:\n#log_file.jorphan=jorphan.log\n#log_file.jmeter=jmeter.log\n\n# If the filename contains  paired single-quotes, then the name is processed\n# as a SimpleDateFormat format applied to the current date, for example:\n#log_file='jmeter_'yyyyMMddHHmmss'.tmp'\n\n# N.B. When JMeter starts, it sets the system property:\n#    org.apache.commons.logging.Log\n# to\n#    org.apache.commons.logging.impl.LogKitLogger\n# if not already set. This causes Apache and Commons HttpClient to use the same logging as JMeter\n\n# Further logging configuration\n# Excalibur logging provides the facility to configure logging using\n# configuration files written in XML. This allows for such features as\n# log file rotation which are not supported directly by JMeter.\n#\n# If such a file specified, it will be applied to the current logging\n# hierarchy when that has been created.\n# \n#log_config=logkit.xml\n\n#---------------------------------------------------------------------------\n# HTTP Java configuration\n#---------------------------------------------------------------------------\n\n# Number of connection retries performed by HTTP Java sampler before giving up\n#http.java.sampler.retries=10\n# 0 now means don't retry connection (in 2.3 and before it meant no tries at all!)\n\n#---------------------------------------------------------------------------\n# Commons HTTPClient configuration\n#---------------------------------------------------------------------------\n\n# define a properties file for overriding Commons HttpClient parameters\n# See: http://hc.apache.org/httpclient-3.x/preference-api.html\n# Uncomment this line if you put anything in httpclient.parameters file\n#httpclient.parameters.file=httpclient.parameters\n\n\n# define a properties file for overriding Apache HttpClient parameters\n# See: TBA\n# Uncomment this line if you put anything in hc.parameters file\n#hc.parameters.file=hc.parameters\n\n# Following properties apply to both Commons and Apache HttpClient\n\n# set the socket timeout (or use the parameter http.socket.timeout) \n# for AJP Sampler and HttpClient3 implementation.\n# Note for HttpClient3 implementation it is better to use GUI to set timeout \n# or use http.socket.timeout in httpclient.parameters\n# Value is in milliseconds\n#httpclient.timeout=0\n# 0 == no timeout\n\n# Set the http version (defaults to 1.1)\n#httpclient.version=1.0 (or use the parameter http.protocol.version)\n\n# Define characters per second > 0 to emulate slow connections\n#httpclient.socket.http.cps=0\n#httpclient.socket.https.cps=0\n\n#Enable loopback protocol\n#httpclient.loopback=true\n\n# Define the local host address to be used for multi-homed hosts\n#httpclient.localaddress=1.2.3.4\n\n# AuthManager Kerberos configuration\n# Name of application module used in jaas.conf\n#kerberos_jaas_application=JMeter  \n\n# Should ports be stripped from urls before constructing SPNs\n# for spnego authentication\n#kerberos.spnego.strip_port=true\n\n#         Sample logging levels for Commons HttpClient\n#\n# Commons HttpClient Logging information can be found at:\n# http://hc.apache.org/httpclient-3.x/logging.html\n\n# Note that full category names are used, i.e. must include the org.apache.\n# Info level produces no output:\n#log_level.org.apache.commons.httpclient=debug\n# Might be useful:\n#log_level.org.apache.commons.httpclient.Authenticator=trace \n\n# Show headers only\n#log_level.httpclient.wire.header=debug\n\n# Full wire debug produces a lot of output; consider using separate file:\n#log_level.httpclient.wire=debug\n#log_file.httpclient=httpclient.log\n\n\n#         Apache Commons HttpClient logging examples\n#\n# Enable header wire + context logging - Best for Debugging\n#log_level.org.apache.http=DEBUG\n#log_level.org.apache.http.wire=ERROR\n\n# Enable full wire + context logging\n#log_level.org.apache.http=DEBUG\n\n# Enable context logging for connection management\n#log_level.org.apache.http.impl.conn=DEBUG\n\n# Enable context logging for connection management / request execution\n#log_level.org.apache.http.impl.conn=DEBUG\n#log_level.org.apache.http.impl.client=DEBUG\n#log_level.org.apache.http.client=DEBUG\n\n#---------------------------------------------------------------------------\n# Apache HttpComponents HTTPClient configuration (HTTPClient4)\n#---------------------------------------------------------------------------\n\n# Number of retries to attempt (default 0)\n#httpclient4.retrycount=0\n\n# Idle connection timeout (ms) to apply if the server does not send Keep-Alive headers\n#httpclient4.idletimeout=0\n# Note: this is currently an experimental fix\n\n#---------------------------------------------------------------------------\n# Apache HttpComponents HTTPClient configuration (HTTPClient 3.1)\n#---------------------------------------------------------------------------\n\n# Number of retries to attempt (default 0)\n#httpclient3.retrycount=0\n\n#---------------------------------------------------------------------------\n# HTTP Cache Manager configuration\n#---------------------------------------------------------------------------\n#\n# Space or comma separated list of methods that can be cached\n#cacheable_methods=GET\n# N.B. This property is currently a temporary solution for Bug 56162\n\n# Since 2.12, JMeter does not create anymore a Sample Result with 204 response \n# code for a resource found in cache which is inline with what browser do.\n#cache_manager.cached_resource_mode=RETURN_NO_SAMPLE\n\n# You can choose between 3 modes:\n# RETURN_NO_SAMPLE (default)\n# RETURN_200_CACHE\n# RETURN_CUSTOM_STATUS\n\n# Those mode have the following behaviours:\n# RETURN_NO_SAMPLE : this mode returns no Sample Result, it has no additional configuration\n# RETURN_200_CACHE : this mode will return Sample Result with response code to 200 and response message to \"(ex cache)\", you can modify response message by setting \n# RETURN_200_CACHE.message=(ex cache)\n# RETURN_CUSTOM_STATUS : This mode lets you select what response code and message you want to return, if you use this mode you need to set those properties\n# RETURN_CUSTOM_STATUS.code=\n# RETURN_CUSTOM_STATUS.message=\n\n#---------------------------------------------------------------------------\n# Results file configuration\n#---------------------------------------------------------------------------\n\n# This section helps determine how result data will be saved.\n# The commented out values are the defaults.\n\n# legitimate values: xml, csv, db.  Only xml and csv are currently supported.\njmeter.save.saveservice.output_format=csv\n\n\n# true when field should be saved; false otherwise\n\n# assertion_results_failure_message only affects CSV output\n#jmeter.save.saveservice.assertion_results_failure_message=false\n#\n# legitimate values: none, first, all\n#jmeter.save.saveservice.assertion_results=none\n#\n#jmeter.save.saveservice.data_type=true\n#jmeter.save.saveservice.label=true\n#jmeter.save.saveservice.response_code=true\n# response_data is not currently supported for CSV output\n#jmeter.save.saveservice.response_data=false\n# Save ResponseData for failed samples\n#jmeter.save.saveservice.response_data.on_error=false\n#jmeter.save.saveservice.response_message=true\n#jmeter.save.saveservice.successful=true\n#jmeter.save.saveservice.thread_name=true\n#jmeter.save.saveservice.time=true\n#jmeter.save.saveservice.subresults=true\n#jmeter.save.saveservice.assertions=true\n#jmeter.save.saveservice.latency=true\n#jmeter.save.saveservice.connect_time=false\n#jmeter.save.saveservice.samplerData=false\n#jmeter.save.saveservice.responseHeaders=false\n#jmeter.save.saveservice.requestHeaders=false\n#jmeter.save.saveservice.encoding=false\n#jmeter.save.saveservice.bytes=true\n#jmeter.save.saveservice.url=false\n#jmeter.save.saveservice.filename=false\njmeter.save.saveservice.hostname=false\njmeter.save.saveservice.thread_counts=true\n#jmeter.save.saveservice.sample_count=false\n#jmeter.save.saveservice.idle_time=false\n\n# Timestamp format - this only affects CSV output files\n# legitimate values: none, ms, or a format suitable for SimpleDateFormat\n#jmeter.save.saveservice.timestamp_format=ms\n#jmeter.save.saveservice.timestamp_format=yyyy/MM/dd HH:mm:ss.SSS\n\n# For use with Comma-separated value (CSV) files or other formats\n# where the fields' values are separated by specified delimiters.\n# Default:\n#jmeter.save.saveservice.default_delimiter=,\n# For TAB, since JMeter 2.3 one can use:\n#jmeter.save.saveservice.default_delimiter=\\t\n\n# Only applies to CSV format files:\njmeter.save.saveservice.print_field_names=false\n\n# Optional list of JMeter variable names whose values are to be saved in the result data files.\n# Use commas to separate the names. For example:\n#sample_variables=SESSION_ID,REFERENCE\n# N.B. The current implementation saves the values in XML as attributes,\n# so the names must be valid XML names.\n# Versions of JMeter after 2.3.2 send the variable to all servers\n# to ensure that the correct data is available at the client.\n\n# Optional xml processing instruction for line 2 of the file:\n#jmeter.save.saveservice.xml_pi=<?xml-stylesheet type=\"text/xsl\" href=\"../extras/jmeter-results-detail-report_21.xsl\"?>\n\n# Prefix used to identify filenames that are relative to the current base\n#jmeter.save.saveservice.base_prefix=~/\n\n# AutoFlush on each line written in XML or CSV output\n# Setting this to true will result in less test results data loss in case of Crash\n# but with impact on performances, particularly for intensive tests (low or no pauses)\n# Since JMeter 2.10, this is false by default\n#jmeter.save.saveservice.autoflush=false\n\n#---------------------------------------------------------------------------\n# Settings that affect SampleResults\n#---------------------------------------------------------------------------\n\n# Save the start time stamp instead of the end\n# This also affects the timestamp stored in result files\nsampleresult.timestamp.start=true\n\n# Whether to use System.nanoTime() - otherwise only use System.currentTimeMillis()\n#sampleresult.useNanoTime=true\n\n# Use a background thread to calculate the nanoTime offset\n# Set this to <= 0 to disable the background thread\n#sampleresult.nanoThreadSleep=5000\n\n#---------------------------------------------------------------------------\n# Upgrade property\n#---------------------------------------------------------------------------\n\n# File that holds a record of name changes for backward compatibility issues\nupgrade_properties=/bin/upgrade.properties\n\n#---------------------------------------------------------------------------\n# JMeter Test Script recorder configuration\n#\n# N.B. The element was originally called the Proxy recorder, which is why the\n# properties have the prefix \"proxy\".\n#---------------------------------------------------------------------------\n\n# If the recorder detects a gap of at least 5s (default) between HTTP requests,\n# it assumes that the user has clicked a new URL\n#proxy.pause=5000\n\n# Add numeric prefix to Sampler names (default true)\n#proxy.number.requests=true\n\n# List of URL patterns that will be added to URL Patterns to exclude\n# Separate multiple lines with ;\n#proxy.excludes.suggested=.*\\\\.(bmp|css|js|gif|ico|jpe?g|png|swf|woff)\n\n# Change the default HTTP Sampler (currently HttpClient4)\n# Java:\n#jmeter.httpsampler=HTTPSampler\n#or\n#jmeter.httpsampler=Java\n#\n# Apache HTTPClient:\n#jmeter.httpsampler=HTTPSampler2\n#or\n#jmeter.httpsampler=HttpClient3.1\n#\n# HttpClient4.x\n#jmeter.httpsampler=HttpClient4\n\n# By default JMeter tries to be more lenient with RFC2616 redirects and allows\n# relative paths.\n# If you want to test strict conformance, set this value to true\n# When the property is true, JMeter follows http://tools.ietf.org/html/rfc3986#section-5.2\n#jmeter.httpclient.strict_rfc2616=false\n\n# Default content-type include filter to use\n#proxy.content_type_include=text/html|text/plain|text/xml\n# Default content-type exclude filter to use\n#proxy.content_type_exclude=image/.*|text/css|application/.*\n\n# Default headers to remove from Header Manager elements\n# (Cookie and Authorization are always removed)\n#proxy.headers.remove=If-Modified-Since,If-None-Match,Host\n\n# Binary content-type handling\n# These content-types will be handled by saving the request in a file:\n#proxy.binary.types=application/x-amf,application/x-java-serialized-object\n# The files will be saved in this directory:\n#proxy.binary.directory=user.dir\n# The files will be created with this file filesuffix:\n#proxy.binary.filesuffix=.binary\n\n#---------------------------------------------------------------------------\n# Test Script Recorder certificate configuration\n#---------------------------------------------------------------------------\n\n#proxy.cert.directory=<JMeter bin directory>\n#proxy.cert.file=proxyserver.jks\n#proxy.cert.type=JKS\n#proxy.cert.keystorepass=password\n#proxy.cert.keypassword=password\n#proxy.cert.factory=SunX509\n# define this property if you wish to use your own keystore\n#proxy.cert.alias=<none>\n# The default validity for certificates created by JMeter\n#proxy.cert.validity=7\n# Use dynamic key generation (if supported by JMeter/JVM)\n# If false, will revert to using a single key with no certificate\n#proxy.cert.dynamic_keys=true\n\n#---------------------------------------------------------------------------\n# Test Script Recorder miscellaneous configuration\n#---------------------------------------------------------------------------\n\n# Whether to attempt disabling of samples that resulted from redirects\n# where the generated samples use auto-redirection\n#proxy.redirect.disabling=true\n\n# SSL configuration\n#proxy.ssl.protocol=TLS\n\n#---------------------------------------------------------------------------\n# JMeter Proxy configuration\n#---------------------------------------------------------------------------\n# use command-line flags for user-name and password\n#http.proxyDomain=NTLM domain, if required by HTTPClient sampler\n\n#---------------------------------------------------------------------------\n# HTTPSampleResponse Parser configuration\n#---------------------------------------------------------------------------\n\n# Space-separated list of parser groups\nHTTPResponse.parsers=htmlParser wmlParser\n# for each parser, there should be a parser.types and a parser.className property\n\n#---------------------------------------------------------------------------\n# HTML Parser configuration\n#---------------------------------------------------------------------------\n\n# Define the HTML parser to be used.\n# Default parser:\n# This new parser (since 2.10) should perform better than all others\n# see https://issues.apache.org/bugzilla/show_bug.cgi?id=55632\n#htmlParser.className=org.apache.jmeter.protocol.http.parser.LagartoBasedHtmlParser\n\n# Other parsers:\n# Default parser before 2.10\n#htmlParser.className=org.apache.jmeter.protocol.http.parser.HtmlParserHTMLParser\n#htmlParser.className=org.apache.jmeter.protocol.http.parser.JTidyHTMLParser\n# Note that Regexp extractor may detect references that have been commented out.\n# In many cases it will work OK, but you should be aware that it may generate \n# additional references.\n#htmlParser.className=org.apache.jmeter.protocol.http.parser.RegexpHTMLParser\n# This parser is based on JSoup, it should be the most accurate but less performant\n# than LagartoBasedHtmlParser\n#htmlParser.className=org.apache.jmeter.protocol.http.parser.JsoupBasedHtmlParser\n\n#Used by HTTPSamplerBase to associate htmlParser with content types below \nhtmlParser.types=text/html application/xhtml+xml application/xml text/xml\n\n#---------------------------------------------------------------------------\n# WML Parser configuration\n#---------------------------------------------------------------------------\n\nwmlParser.className=org.apache.jmeter.protocol.http.parser.RegexpHTMLParser\n\n#Used by HTTPSamplerBase to associate wmlParser with content types below \nwmlParser.types=text/vnd.wap.wml \n\n#---------------------------------------------------------------------------\n# Remote batching configuration\n#---------------------------------------------------------------------------\n# How is Sample sender implementations configured:\n# - true (default) means client configuration will be used\n# - false means server configuration will be used\n#sample_sender_client_configured=true\n\n# Remote batching support\n# Since JMeter 2.9, default is MODE_STRIPPED_BATCH, which returns samples in\n# batch mode (every 100 samples or every minute by default)\n# Note also that MODE_STRIPPED_BATCH strips response data from SampleResult, so if you need it change to\n# another mode\n# Hold retains samples until end of test (may need lots of memory)\n# Batch returns samples in batches\n# Statistical returns sample summary statistics\n# hold_samples was originally defined as a separate property,\n# but can now also be defined using mode=Hold\n# mode can also be the class name of an implementation of org.apache.jmeter.samplers.SampleSender\n#mode=Standard\n#mode=Batch\n#mode=Hold\n#mode=Statistical\n#Set to true to key statistical samples on threadName rather than threadGroup\n#key_on_threadname=false\n#mode=Stripped\n#mode=StrippedBatch\n#mode=org.example.load.MySampleSender\n#\n#num_sample_threshold=100\n# Value is in milliseconds\n#time_threshold=60000\n#\n# Asynchronous sender; uses a queue and background worker process to return the samples\n#mode=Asynch\n# default queue size\n#asynch.batch.queue.size=100\n# Same as Asynch but strips response data from SampleResult\n#mode=StrippedAsynch\n#\n# DiskStore: as for Hold mode, but serialises the samples to disk, rather than saving in memory\n#mode=DiskStore\n# Same as DiskStore but strips response data from SampleResult\n#mode=StrippedDiskStore\n# Note: the mode is currently resolved on the client; \n# other properties (e.g. time_threshold) are resolved on the server.\n\n# To set the Monitor Health Visualiser buffer size, enter the desired value\n# monitor.buffer.size=800\n\n#---------------------------------------------------------------------------\n# JDBC Request configuration\n#---------------------------------------------------------------------------\n\n# Max number of PreparedStatements per Connection for PreparedStatement cache\n#jdbcsampler.maxopenpreparedstatements=100\n\n# String used to indicate a null value\n#jdbcsampler.nullmarker=]NULL[\n\n#---------------------------------------------------------------------------\n# OS Process Sampler configuration\n#---------------------------------------------------------------------------\n# Polling to see if process has finished its work, used when a timeout is configured on sampler\n#os_sampler.poll_for_timeout=100\n\n#---------------------------------------------------------------------------\n# TCP Sampler configuration\n#---------------------------------------------------------------------------\n\n# The default handler class\n#tcp.handler=TCPClientImpl\n#\n# eolByte = byte value for end of line\n# set this to a value outside the range -128 to +127 to skip eol checking\n#tcp.eolByte=1000\n#\n# TCP Charset, used by org.apache.jmeter.protocol.tcp.sampler.TCPClientImpl\n# default to Platform defaults charset as returned by Charset.defaultCharset().name()\n#tcp.charset=\n#\n# status.prefix and suffix = strings that enclose the status response code\n#tcp.status.prefix=Status=\n#tcp.status.suffix=.\n#\n# status.properties = property file to convert codes to messages\n#tcp.status.properties=mytestfiles/tcpstatus.properties\n\n# The length prefix used by LengthPrefixedBinaryTCPClientImpl implementation\n# defaults to 2 bytes.\n#tcp.binarylength.prefix.length=2\n\n#---------------------------------------------------------------------------\n# Summariser - Generate Summary Results - configuration (mainly applies to non-GUI mode)\n#---------------------------------------------------------------------------\n#\n# Define the following property to automatically start a summariser with that name\n# (applies to non-GUI mode only)\n#summariser.name=summary\n#\n# interval between summaries (in seconds) default 30 seconds\nsummariser.interval=15\n#\n# Write messages to log file\n#summariser.log=true\n#\n# Write messages to System.out\n#summariser.out=true\n\n\n#---------------------------------------------------------------------------\n# Aggregate Report and Aggregate Graph - configuration\n#---------------------------------------------------------------------------\n#\n# Percentiles to display in reports\n# Can be float value between 0 and 100\n# First percentile to display, defaults to 90%\n#aggregate_rpt_pct1=90\n# Second percentile to display, defaults to 95%\n#aggregate_rpt_pct2=95\n# Second percentile to display, defaults to 99%\n#aggregate_rpt_pct3=99\n\n#---------------------------------------------------------------------------\n# Aggregate Report and Aggregate Graph - configuration\n#---------------------------------------------------------------------------\n#\n# Backend metrics sliding window size for Percentiles, Min, Max\n#backend_metrics_window=100\n\n#---------------------------------------------------------------------------\n# BeanShell configuration\n#---------------------------------------------------------------------------\n\n# BeanShell Server properties\n#\n# Define the port number as non-zero to start the http server on that port\n#beanshell.server.port=9000\n# The telnet server will be started on the next port\n\n#\n# Define the server initialisation file\nbeanshell.server.file=../extras/startup.bsh\n\n#\n# Define a file to be processed at startup\n# This is processed using its own interpreter.\n#beanshell.init.file=\n\n#\n# Define the intialisation files for BeanShell Sampler, Function and other BeanShell elements\n# N.B. Beanshell test elements do not share interpreters.\n#      Each element in each thread has its own interpreter.\n#      This is retained between samples.\n#beanshell.sampler.init=BeanShellSampler.bshrc\n#beanshell.function.init=BeanShellFunction.bshrc\n#beanshell.assertion.init=BeanShellAssertion.bshrc\n#beanshell.listener.init=etc\n#beanshell.postprocessor.init=etc\n#beanshell.preprocessor.init=etc\n#beanshell.timer.init=etc\n\n# The file BeanShellListeners.bshrc contains sample definitions\n# of Test and Thread Listeners.\n\n#---------------------------------------------------------------------------\n# MailerModel configuration\n#---------------------------------------------------------------------------\n\n# Number of successful samples before a message is sent\n#mailer.successlimit=2\n#\n# Number of failed samples before a message is sent\n#mailer.failurelimit=2\n\n#---------------------------------------------------------------------------\n# CSVRead configuration\n#---------------------------------------------------------------------------\n\n# CSVRead delimiter setting (default \",\")\n# Make sure that there are no trailing spaces or tabs after the delimiter\n# characters, or these will be included in the list of valid delimiters\n#csvread.delimiter=,\n#csvread.delimiter=;\n#csvread.delimiter=!\n#csvread.delimiter=~\n# The following line has a tab after the =\n#csvread.delimiter= \n\n#---------------------------------------------------------------------------\n# __time() function configuration\n#\n# The properties below can be used to redefine the default formats\n#---------------------------------------------------------------------------\n#time.YMD=yyyyMMdd\n#time.HMS=HHmmss\n#time.YMDHMS=yyyyMMdd-HHmmss\n#time.USER1=\n#time.USER2=\n\n#---------------------------------------------------------------------------\n# CSV DataSet configuration\n#---------------------------------------------------------------------------\n\n# String to return at EOF (if recycle not used)\n#csvdataset.eofstring=<EOF>\n\n#---------------------------------------------------------------------------\n# LDAP Sampler configuration\n#---------------------------------------------------------------------------\n# Maximum number of search results returned by a search that will be sorted\n# to guarantee a stable ordering (if more results then this limit are retruned\n# then no sorting is done). Set to 0 to turn off all sorting, in which case\n# \"Equals\" response assertions will be very likely to fail against search results.\n#\n#ldapsampler.max_sorted_results=1000\n \n# Number of characters to log for each of three sections (starting matching section, diff section,\n#   ending matching section where not all sections will appear for all diffs) diff display when an Equals\n#   assertion fails. So a value of 100 means a maximum of 300 characters of diff text will be displayed\n#   (+ a number of extra characters like \"...\" and \"[[[\"/\"]]]\" which are used to decorate it).\n#assertion.equals_section_diff_len=100\n# test written out to log to signify start/end of diff delta\n#assertion.equals_diff_delta_start=[[[\n#assertion.equals_diff_delta_end=]]]\n\n#---------------------------------------------------------------------------\n# Miscellaneous configuration\n#---------------------------------------------------------------------------\n\n# If defined, then start the mirror server on the port\n#mirror.server.port=8081\n\n# ORO PatternCacheLRU size\n#oro.patterncache.size=1000\n\n#TestBeanGui\n#\n#propertyEditorSearchPath=null\n\n# Turn expert mode on/off: expert mode will show expert-mode beans and properties\n#jmeter.expertMode=true\n\n# Maximum redirects to follow in a single sequence (default 5)\n#httpsampler.max_redirects=5\n# Maximum frame/iframe nesting depth (default 5)\n#httpsampler.max_frame_depth=5\n# Maximum await termination timeout (secs) when concurrent download embedded resources (default 60)\n#httpsampler.await_termination_timeout=60\n# Revert to BUG 51939 behaviour (no separate container for embedded resources) by setting the following false:\n#httpsampler.separate.container=true\n\n# If embedded resources download fails due to missing resources or other reasons, if this property is true\n# Parent sample will not be marked as failed \nhttpsampler.ignore_failed_embedded_resources=true\n\n# The encoding to be used if none is provided (default ISO-8859-1)\n#sampleresult.default.encoding=ISO-8859-1\n\n# Network response size calculation method\n# Use real size: number of bytes for response body return by webserver\n# (i.e. the network bytes received for response)\n# if set to false, the (uncompressed) response data size will used (default before 2.5)\n# Include headers: add the headers size in real size\n#sampleresult.getbytes.body_real_size=true\n#sampleresult.getbytes.headers_size=true\n\n# CookieManager behaviour - should cookies with null/empty values be deleted?\n# Default is true. Use false to revert to original behaviour\n#CookieManager.delete_null_cookies=true\n\n# CookieManager behaviour - should variable cookies be allowed?\n# Default is true. Use false to revert to original behaviour\n#CookieManager.allow_variable_cookies=true\n\n# CookieManager behaviour - should Cookies be stored as variables?\n# Default is false\n#CookieManager.save.cookies=false\n\n# CookieManager behaviour - prefix to add to cookie name before storing it as a variable\n# Default is COOKIE_; to remove the prefix, define it as one or more spaces\n#CookieManager.name.prefix=\n \n# CookieManager behaviour - check received cookies are valid before storing them?\n# Default is true. Use false to revert to previous behaviour\n#CookieManager.check.cookies=true\n\n# (2.0.3) JMeterThread behaviour has been changed to set the started flag before\n# the controllers are initialised. This is so controllers can access variables earlier. \n# In case this causes problems, the previous behaviour can be restored by uncommenting\n# the following line.\n#jmeterthread.startearlier=false\n\n# (2.2.1) JMeterThread behaviour has changed so that PostProcessors are run in forward order\n# (as they appear in the test plan) rather than reverse order as previously.\n# Uncomment the following line to revert to the original behaviour\n#jmeterthread.reversePostProcessors=true\n\n# (2.2) StandardJMeterEngine behaviour has been changed to notify the listeners after\n# the running version is enabled. This is so they can access variables. \n# In case this causes problems, the previous behaviour can be restored by uncommenting\n# the following line.\n#jmeterengine.startlistenerslater=false\n\n# Number of milliseconds to wait for a thread to stop\n#jmeterengine.threadstop.wait=5000\n\n#Whether to invoke System.exit(0) in server exit code after stopping RMI\n#jmeterengine.remote.system.exit=false\n\n# Whether to call System.exit(1) on failure to stop threads in non-GUI mode.\n# This only takes effect if the test was explictly requested to stop.\n# If this is disabled, it may be necessary to kill the JVM externally\n#jmeterengine.stopfail.system.exit=true\n\n# Whether to force call System.exit(0) at end of test in non-GUI mode, even if\n# there were no failures and the test was not explicitly asked to stop.\n# Without this, the JVM may never exit if there are other threads spawned by\n# the test which never exit.\n#jmeterengine.force.system.exit=false\n\n# How long to pause (in ms) in the daemon thread before reporting that the JVM has failed to exit.\n# If the value is <= 0, the JMeter does not start the daemon thread \n#jmeter.exit.check.pause=2000\n\n# If running non-GUI, then JMeter listens on the following port for a shutdown message.\n# To disable, set the port to 1000 or less.\n#jmeterengine.nongui.port=4445\n#\n# If the initial port is busy, keep trying until this port is reached\n# (to disable searching, set the value less than or equal to the .port property)\n#jmeterengine.nongui.maxport=4455\n\n# How often to check for shutdown during ramp-up (milliseconds)\n#jmeterthread.rampup.granularity=1000\n\n#Should JMeter expand the tree when loading a test plan?\n# default value is false since JMeter 2.7\n#onload.expandtree=false\n\n#JSyntaxTextArea configuration\n#jsyntaxtextarea.wrapstyleword=true\n#jsyntaxtextarea.linewrap=true\n#jsyntaxtextarea.codefolding=true\n# Set 0 to disable undo feature in JSyntaxTextArea\n#jsyntaxtextarea.maxundos=50\n\n# Set this to false to disable the use of JSyntaxTextArea for the Console Logger panel \n#loggerpanel.usejsyntaxtext=true\n\n# Maximum size of HTML page that can be displayed; default=200 * 1024\n# Set to 0 to disable the size check and display the whole response\n#view.results.tree.max_size=204800\n\n# Order of Renderers in View Results Tree\n# Note full class names should be used for non jmeter core renderers\n# For JMeter core renderers, class names start with . and are automatically\n# prefixed with org.apache.jmeter.visualizers\nview.results.tree.renderers_order=.RenderAsText,.RenderAsRegexp,.RenderAsCssJQuery,.RenderAsXPath,.RenderAsHTML,.RenderAsHTMLWithEmbedded,.RenderAsDocument,.RenderAsJSON,.RenderAsXML\n\n# Maximum size of Document that can be parsed by Tika engine; defaut=10 * 1024 * 1024 (10MB)\n# Set to 0 to disable the size check\n#document.max_size=0\n\n#JMS options\n# Enable the following property to stop JMS Point-to-Point Sampler from using\n# the properties java.naming.security.[principal|credentials] when creating the queue connection\n#JMSSampler.useSecurity.properties=false\n\n# Set the following value to true in order to skip the delete confirmation dialogue\n#confirm.delete.skip=false\n\n# Used by Webservice Sampler (SOAP)\n# Size of Document Cache\n#soap.document_cache=50\n\n# Used by JSR223 elements\n# Size of compiled scripts cache\n#jsr223.compiled_scripts_cache_size=100\n\n#---------------------------------------------------------------------------\n# Classpath configuration\n#---------------------------------------------------------------------------\n\n# List of paths (separated by ;) to search for additional JMeter plugin classes,\n# for example new GUI elements and samplers.\n# A path item can either be a jar file or a directory.\n# Any jar file in such a directory will be automatically included,\n# jar files in sub directories are ignored.\n# The given value is in addition to any jars found in the lib/ext directory.\n# Do not use this for utility or plugin dependency jars.\n#search_paths=/app1/lib;/app2/lib\n\n# List of paths that JMeter will search for utility and plugin dependency classes.\n# Use your platform path separator to separate multiple paths.\n# A path item can either be a jar file or a directory.\n# Any jar file in such a directory will be automatically included,\n# jar files in sub directories are ignored.\n# The given value is in addition to any jars found in the lib directory.\n# All entries will be added to the class path of the system class loader\n# and also to the path of the JMeter internal loader.\n# Paths with spaces may cause problems for the JVM\n#user.classpath=../classes;../lib;../app1/jar1.jar;../app2/jar2.jar\n\n# List of paths (separated by ;) that JMeter will search for utility\n# and plugin dependency classes.\n# A path item can either be a jar file or a directory.\n# Any jar file in such a directory will be automatically included,\n# jar files in sub directories are ignored.\n# The given value is in addition to any jars found in the lib directory\n# or given by the user.classpath property.\n# All entries will be added to the path of the JMeter internal loader only.\n# For plugin dependencies using plugin_dependency_paths should be preferred over\n# user.classpath.\n#plugin_dependency_paths=../dependencies/lib;../app1/jar1.jar;../app2/jar2.jar\n\n# Classpath finder\n# ================\n# The classpath finder currently needs to load every single JMeter class to find\n# the classes it needs.\n# For non-GUI mode, it's only necessary to scan for Function classes, but all classes\n# are still loaded.\n# All current Function classes include \".function.\" in their name,\n# and none include \".gui.\" in the name, so the number of unwanted classes loaded can be\n# reduced by checking for these. However, if a valid function class name does not match\n# these restrictions, it will not be loaded. If problems are encountered, then comment\n# or change the following properties:\nclassfinder.functions.contain=.functions.\nclassfinder.functions.notContain=.gui.\n\n#---------------------------------------------------------------------------\n# Additional property files to load\n#---------------------------------------------------------------------------\n\n# Should JMeter automatically load additional JMeter properties?\n# File name to look for (comment to disable)\nuser.properties=user.properties\n\n# Should JMeter automatically load additional system properties?\n# File name to look for (comment to disable)\nsystem.properties=system.properties\n\n# Comma separated list of files that contain reference to templates and their description\n# Path must be relative to jmeter root folder\n#template.files=/bin/templates/templates.xml\n"
  },
  {
    "path": "system.properties",
    "content": "# Sample system.properties file\n#\n##   Licensed to the Apache Software Foundation (ASF) under one or more\n##   contributor license agreements.  See the NOTICE file distributed with\n##   this work for additional information regarding copyright ownership.\n##   The ASF licenses this file to You under the Apache License, Version 2.0\n##   (the \"License\"); you may not use this file except in compliance with\n##   the License.  You may obtain a copy of the License at\n## \n##       http://www.apache.org/licenses/LICENSE-2.0\n## \n##   Unless required by applicable law or agreed to in writing, software\n##   distributed under the License is distributed on an \"AS IS\" BASIS,\n##   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n##   See the License for the specific language governing permissions and\n##   limitations under the License.\n\n#    Commons Logging properties\n#    Used by HttpComponents 4.x, see: \n#    http://hc.apache.org/httpcomponents-client-4.3.x/logging.html\n#\n# By default, Commons Logging is configured by JMeter to use the same logging system\n# as the main JMeter code; to configure it please see jmeter.properties.\n#\n# Uncomment to enable debugging of Commons Logging setup; may be useful if\n# implementation cannot be instantiated:\n#org.apache.commons.logging.diagnostics.dest=STDERR\n#\n# Uncomment to enable Commons Logging to use standard output\n#org.apache.commons.logging.Log=org.apache.commons.logging.impl.SimpleLog\n#org.apache.commons.logging.simplelog.showdatetime=true\n#\n# Uncomment the following two lines to generate basic debug logging for HC4.x\n#org.apache.commons.logging.simplelog.log.org.apache.http=DEBUG\n#org.apache.commons.logging.simplelog.log.org.apache.http.wire=ERROR\n\n# Java networking-related properties\n#\n# For details of Oracle Java network properties, see for example:\n# http://download.oracle.com/javase/1.5.0/docs/guide/net/properties.html\n#\n#java.net.preferIPv4Stack=false\n#java.net.preferIPv6Addresses=false\n#networkaddress.cache.ttl=-1\n#networkaddress.cache.negative.ttl=10\n\n#\n#\n# SSL properties (moved from jmeter.properties)\n#\n# See http://download.oracle.com/javase/1.5.0/docs/guide/security/jsse/JSSERefGuide.html#Customization\n# for information on the javax.ssl system properties\n\n# Truststore properties (trusted certificates)\n#javax.net.ssl.trustStore\n#javax.net.ssl.trustStorePassword\n#javax.net.ssl.trustStoreProvider\n#javax.net.ssl.trustStoreType [default = KeyStore.getDefaultType()]\n\n# Keystore properties (client certificates)\n# Location\n#javax.net.ssl.keyStore\n#\n#The password to your keystore\n#javax.net.ssl.keyStorePassword\n#\n#javax.net.ssl.keyStoreProvider\n#javax.net.ssl.keyStoreType [default = KeyStore.getDefaultType()]\n\n# SSL debugging:\n# See http://download.oracle.com/javase/1.5.0/docs/guide/security/jsse/JSSERefGuide.html#Debug\n#\n# javax.net.debug=help - generates the list below:\n#all            turn on all debugging\n#ssl            turn on ssl debugging\n#\n#The following can be used with ssl:\n#        record       enable per-record tracing\n#        handshake    print each handshake message\n#        keygen       print key generation data\n#        session      print session activity\n#        defaultctx   print default SSL initialization\n#        sslctx       print SSLContext tracing\n#        sessioncache print session cache tracing\n#        keymanager   print key manager tracing\n#        trustmanager print trust manager tracing\n#\n#        handshake debugging can be widened with:\n#        data         hex dump of each handshake message\n#        verbose      verbose handshake message printing\n#\n#        record debugging can be widened with:\n#        plaintext    hex dump of record plaintext\n#\n# Examples:\n#javax.net.debug=ssl\n#javax.net.debug=sslctx,session,sessioncache\n#\n#\n# We enable the following property to allow headers such as \"Host\" to be passed through.\n# See http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6996110\nsun.net.http.allowRestrictedHeaders=true\n\n#Uncomment for Kerberos authentication and edit the 2 config files to match your domains\n#With the following configuration krb5.conf and jaas.conf must be located in bin folder\n#You can modify these file paths to use absolute location\n#java.security.krb5.conf=krb5.conf\n#java.security.auth.login.config=jaas.conf\n\n# Location of keytool application\n# This property can be defined if JMeter cannot find the application automatically\n# It should not be necessary in most cases.\n#keytool.directory=<Java Home Directory>/bin\n"
  },
  {
    "path": "verify.sh",
    "content": "#!/bin/bash\n#\n# jmeter-ec2 - Install Script (Runs on remote ec2 server)\n#\n\nfunction install_jmeter_plugins() {\n    echo \"Installing plugins...\"\n    wget -q -O ~/JMeterPlugins-Extras.jar https://s3.amazonaws.com/jmeter-ec2/JMeterPlugins-Extras.jar\n    wget -q -O ~/JMeterPlugins-Standard.jar https://s3.amazonaws.com/jmeter-ec2/JMeterPlugins-Standard.jar\n    mv ~/JMeterPlugins*.jar ~/$JMETER_VERSION/lib/ext/\n}\n\nfunction install_java() {\n    echo \"Updating apt-get...\"\n    sudo apt-get -qqy update\n    echo \"Installing java...\"\n    sudo DEBIAN_FRONTEND=noninteractive apt-get -qqy install openjdk-7-jre\n    echo \"Java installed\"\n}\n\nfunction install_jmeter() {\n    # ------------------------------------------------\n    #      Decide where to download jmeter from\n    #\n    # Order of preference:\n    #   1. S3, if we have a copy of the file\n    #   2. Mirror, if the desired version is current\n    #   3. Archive, as a backup\n    # ------------------------------------------------\n    if [ $(curl -sI https://s3.amazonaws.com/jmeter-ec2/$JMETER_VERSION.tgz | grep -c \"403 Forbidden\") -eq \"0\" ] ; then\n        # We have a copy on S3 so use that\n        echo \"Downloading jmeter from S3...\"\n        wget -q -O ~/$JMETER_VERSION.tgz https://s3.amazonaws.com/jmeter-ec2/$JMETER_VERSION.tgz\n    elif [ $(echo $(curl -s 'http://www.apache.org/dist/jmeter/binaries/') | grep -c \"$JMETER_VERSION\") -gt \"0\" ] ; then\n        # Nothing found on S3 but this is the current version of jmeter so use the preferred mirror to download\n        echo \"downloading jmeter from a Mirror...\"\n        wget -q -O ~/$JMETER_VERSION.tgz \"http://www.apache.org/dyn/closer.cgi?filename=jmeter/binaries/$JMETER_VERSION.tgz&action=download\"\n    else\n        # Fall back to the archive server\n        echo \"Downloading jmeter from Apache Archive...\"\n        wget -q -O ~/$JMETER_VERSION.tgz http://archive.apache.org/dist/jmeter/binaries/$JMETER_VERSION.tgz\n    fi\n    # Untar downloaded file\n    echo \"Unpacking jmeter...\"\n    tar -xf ~/$JMETER_VERSION.tgz\n    # install jmeter-plugins [http://code.google.com/p/jmeter-plugins/]\n    install_jmeter_plugins\n    echo \"Jmeter installed\"\n}\n\nJMETER_VERSION=$1\ncd ~\n\n# Java\nif java -version 2>&1 >/dev/null | grep -q \"java version\" ; then\n    echo \"Java is already installed\"\nelse\n    install_java\nfi\n\n# JMeter\nif [ ! -d \"$JMETER_VERSION\" ] ; then\n    # install jmeter\n    install_jmeter\nelse\n    echo \"JMeter is already installed\"\nfi\n\n# Done\necho \"software installed\"\n"
  }
]