[
  {
    "path": ".github/workflows/sync.yml",
    "content": "name: Mirror to DUT DIMT\n\non: [ push, delete, create ]\n\njobs:\n  git-mirror:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Configure Private Key\n        env:\n          SSH_PRIVATE_KEY: ${{ secrets.PRIVATE_KEY }}\n        run: |\n          mkdir -p ~/.ssh\n          echo \"$SSH_PRIVATE_KEY\" > ~/.ssh/id_rsa\n          chmod 600 ~/.ssh/id_rsa\n          echo \"StrictHostKeyChecking no\" >> ~/.ssh/config\n      - name: Push Mirror\n        env:\n          SOURCE_REPO: 'https://github.com/JinyuanLiu-CV/TarDAL.git'\n          DESTINATION_REPO: 'git@github.com:dlut-dimt/TarDAL.git'\n        run: |\n          git clone --mirror \"$SOURCE_REPO\" && cd `basename \"$SOURCE_REPO\"`\n          git remote set-url --push origin \"$DESTINATION_REPO\"\n          git fetch -p origin\n          git for-each-ref --format 'delete %(refname)' refs/pull | git update-ref --stdin\n          git push --mirror\n"
  },
  {
    "path": ".gitignore",
    "content": "# project config file (contain sensitive: server information)\n.idea/*\n\n# fuse results (contain images that can be reproduced by given model parameters)\nruns/*\n\n# macOS finder file (contain sensitive: local username)\n**/.DS_Store\n\n# python cache\n**/__pycache__\n\n# experimental data\ndata/*\n!data/README.md\n\n# weights (update by release)\nweights/*\n\n# test files\n**/test/*\n\n# wandb\nwandb/*\n"
  },
  {
    "path": "CITATION.cff",
    "content": "@inproceedings{liu2022target,\n  title={Target-aware Dual Adversarial Learning and a Multi-scenario Multi-Modality Benchmark to Fuse Infrared and Visible for Object Detection},\n  author={Liu, Jinyuan and Fan, Xin and Huang, Zhanbo and Wu, Guanyao and Liu, Risheng and Zhong, Wei and Luo, Zhongxuan},\n  booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},\n  pages={5802--5811},\n  year={2022}\n}\n"
  },
  {
    "path": "LICENSE",
    "content": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU General Public License is a free, copyleft license for\nsoftware and other kinds of works.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nthe GNU General Public License is intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.  We, the Free Software Foundation, use the\nGNU General Public License for most of our software; it applies also to\nany other work released this way by its authors.  You can apply it to\nyour programs, too.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  To protect your rights, we need to prevent others from denying you\nthese rights or asking you to surrender the rights.  Therefore, you have\ncertain responsibilities if you distribute copies of the software, or if\nyou modify it: responsibilities to respect the freedom of others.\n\n  For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must pass on to the recipients the same\nfreedoms that you received.  You must make sure that they, too, receive\nor can get the source code.  And you must show them these terms so they\nknow their rights.\n\n  Developers that use the GNU GPL protect your rights with two steps:\n(1) assert copyright on the software, and (2) offer you this License\ngiving you legal permission to copy, distribute and/or modify it.\n\n  For the developers' and authors' protection, the GPL clearly explains\nthat there is no warranty for this free software.  For both users' and\nauthors' sake, the GPL requires that modified versions be marked as\nchanged, so that their problems will not be attributed erroneously to\nauthors of previous versions.\n\n  Some devices are designed to deny users access to install or run\nmodified versions of the software inside them, although the manufacturer\ncan do so.  This is fundamentally incompatible with the aim of\nprotecting users' freedom to change the software.  The systematic\npattern of such abuse occurs in the area of products for individuals to\nuse, which is precisely where it is most unacceptable.  Therefore, we\nhave designed this version of the GPL to prohibit the practice for those\nproducts.  If such problems arise substantially in other domains, we\nstand ready to extend this provision to those domains in future versions\nof the GPL, as needed to protect the freedom of users.\n\n  Finally, every program is threatened constantly by software patents.\nStates should not allow patents to restrict development and use of\nsoftware on general-purpose computers, but in those that do, we wish to\navoid the special danger that patents applied to a free program could\nmake it effectively proprietary.  To prevent this, the GPL assures that\npatents cannot be used to render the program non-free.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Use with the GNU Affero General Public License.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU Affero General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the special requirements of the GNU Affero General Public License,\nsection 13, concerning interaction through a network will apply to the\ncombination as such.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU General Public License from time to time.  Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License\n    along with this program.  If not, see <https://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If the program does terminal interaction, make it output a short\nnotice like this when it starts in an interactive mode:\n\n    <program>  Copyright (C) <year>  <name of author>\n    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n    This is free software, and you are welcome to redistribute it\n    under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License.  Of course, your program's commands\nmight be different; for a GUI interface, you would use an \"about box\".\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU GPL, see\n<https://www.gnu.org/licenses/>.\n\n  The GNU General Public License does not permit incorporating your program\ninto proprietary programs.  If your program is a subroutine library, you\nmay consider it more useful to permit linking proprietary applications with\nthe library.  If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License.  But first, please read\n<https://www.gnu.org/licenses/why-not-lgpl.html>.\n"
  },
  {
    "path": "README.md",
    "content": "# TarDAL \n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JinyuanLiu-CV/TarDAL/blob/main/tutorial.ipynb)\n![visitors](https://visitor-badge.glitch.me/badge?page_id=JinyuanLiu-CV.TarDAL)\n\nJinyuan Liu, Xin Fan*, Zhangbo Huang, Guanyao Wu, Risheng Liu , Wei Zhong, Zhongxuan Luo,**“Target-aware Dual\nAdversarial Learning and a Multi-scenario Multi-Modality Benchmark to Fuse Infrared and Visible for Object Detection”**,\nIEEE/CVF Conference on Computer Vision and Pattern Recognition **(CVPR)**, 2022. **(Oral)**\n\n- [*[ArXiv]*](https://arxiv.org/abs/2203.16220v1)\n- [*[CVPR]*](https://openaccess.thecvf.com/content/CVPR2022/papers/Liu_Target-Aware_Dual_Adversarial_Learning_and_a_Multi-Scenario_Multi-Modality_Benchmark_To_CVPR_2022_paper.pdf)\n\n---\n\n![Abstract](assets/first_figure.jpg)\n\n---\n\n\n<h2> <p align=\"center\"> M3FD Dataset </p> </h2>  \n\n### Preview\n\nThe preview of our dataset is as follows.\n\n---\n\n![preview](assets/preview.png)\n![gif](assets/preview.gif)\n \n---\n\n### Details\n\n- **Sensor**: A synchronized system containing one binocular optical camera and one binocular infrared sensor. More\n  details are available in the paper.\n\n- **Main scene**:\n    - Campus of Dalian University of Technology.\n    - State Tourism Holiday Resort at the Golden Stone Beach in Dalian, China.\n    - Main roads in Jinzhou District, Dalian, China.\n\n- **Total number of images**:\n    - **8400** (for fusion, detection and fused-based detection)\n    - **600** (independent scene for fusion)\n\n- **Total number of image pairs**:\n    - **4200** (for fusion, detection and fused-based detection)\n    - **300** (independent scene for fusion)\n\n\n- **Format of images**: \n   - [Infrared] 24-bit grayscale bitmap\n   - [Visible]  24-bit color bitmap\n\n- **Image size**: **1024 x 768** pixels (mostly)\n\n- **Registration**: **All image pairs are registered.** The visible images are calibrated by using the internal\n  parameters of our synchronized system, and the infrared images are artificially distorted by homography matrix.\n\n- **Labeling**: **34407 labels** have been manually labeled, containing 6 kinds of targets: **{People, Car, Bus,\n  Motorcycle, Lamp, Truck}**. (Limited by manpower, some targets may be mismarked or missed. We would appreciate if you\n  would point out wrong or missing labels to help us improve the dataset)\n\n### Download\n\n- [Google Drive](https://drive.google.com/drive/folders/1H-oO7bgRuVFYDcMGvxstT1nmy0WF_Y_6?usp=sharing)\n- [Baidu Yun](https://pan.baidu.com/s/1GoJrrl_mn2HNQVDSUdPCrw?pwd=M3FD)\n\n\nIf you have any question or suggestion about the dataset, please email to [Guanyao Wu](mailto:rollingplainko@gmail.com)\nor [Jinyuan Liu](mailto:atlantis918@hotmail.com).\n\n<h2> <p align=\"center\"> TarDAL Fusion </p> </h2>  \n\n### Baselines\n\nIn the experiment process, we used the following **outstanding** work as our baseline.\n\n*Note: Sorted alphabetically*\n\n- [AUIF](https://ieeexplore.ieee.org/document/9416456) (IEEE TCSVT 2021)\n- [DDcGAN](https://github.com/hanna-xu/DDcGAN) (IJCAI 2019)\n- [Densefuse](https://github.com/hli1221/imagefusion_densefuse) (IEEE TIP 2019)\n- [DIDFuse](https://github.com/Zhaozixiang1228/IVIF-DIDFuse) (IJCAI 2020)\n- [FusionGAN](https://github.com/jiayi-ma/FusionGAN) (Information Fusion 2019)\n- [GANMcC](https://github.com/HaoZhang1018/GANMcC) (IEEE TIM 2021)\n- [MFEIF](https://github.com/JinyuanLiu-CV/MFEIF) (IEEE TCSVT 2021)\n- [RFN-Nest](https://github.com/hli1221/imagefusion-rfn-nest) (Information Fusion 2021)\n- [SDNet](https://github.com/HaoZhang1018/SDNet) (IJCV 2021)\n- [U2Fusion](https://github.com/hanna-xu/U2Fusion) (IEEE TPAMI 2020)\n\n### Quick Start\n\nUnder normal circumstances, you may just be curious about the results of the fusion task, so we have prepared an online demonstration.\n\nOur online preview (free) in [Colab](https://colab.research.google.com/github/JinyuanLiu-CV/TarDAL/blob/main/tutorial.ipynb).\n\n### Set Up on Your Own Machine\n\nWhen you want to dive deeper or apply it on a larger scale, you can configure our TarDAL on your computer following the steps below.\n\n#### Virtual Environment\n\nWe strongly recommend that you use Conda as a package manager.\n\n```shell\n# create virtual environment\nconda create -n tardal python=3.10\nconda activate tardal\n# select pytorch version yourself\n# install tardal requirements\npip install -r requirements.txt\n# install yolov5 requirements\npip install -r module/detect/requirements.txt\n```\n\n#### Data Preparation\n\nYou should put the data in the correct place in the following form.\n\n```\nTarDAL ROOT\n├── data\n|   ├── m3fd\n|   |   ├── ir # infrared images\n|   |   ├── vi # visible images\n|   |   ├── labels # labels in txt format (yolo format)\n|   |   └── meta # meta data, includes: pred.txt, train.txt, val.txt\n|   ├── tno\n|   |   ├── ir # infrared images\n|   |   ├── vi # visible images\n|   |   └── meta # meta data, includes: pred.txt, train.txt, val.txt\n|   ├── roadscene\n|   └── ...\n```\n\nYou can directly download the TNO and RoadScene datasets organized in this format from here.\n\n- [Google Drive](https://drive.google.com/drive/folders/1H-oO7bgRuVFYDcMGvxstT1nmy0WF_Y_6?usp=sharing)\n- [Baidu Yun](https://pan.baidu.com/s/1GoJrrl_mn2HNQVDSUdPCrw?pwd=M3FD)\n\n#### Fuse or Eval\n\nIn this section, we will guide you to generate fusion images using our pre-trained model.\n\nAs we mentioned in our paper, we provide three pre-trained models.\n\n| Name      | Description                                                     |\n|-----------|-----------------------------------------------------------------|\n| TarDAL-DT | Optimized for human vision. (Default)                           |\n| TarDAL-TT | Optimized for object detection.                                 |\n| TarDAL-CT | Optimal solution for joint human vision and detection accuracy. |\n\nYou can find their corresponding configuration file path in [configs](config/official/infer).\n\nSome settings you should pay attention to:\n\n* config.yaml\n    * `strategy`: save images (fuse) or save images & labels (fuse & detect)\n    * `dataset`: name & root\n    * `inference`: each item in inference\n* infer.py\n    * `--cfg`: config file path, such as `configs/official/tardal-dt.yaml`\n    * `--save_dir`: result save folder\n\nUnder normal circumstances, you don't need to manually download the model parameters, our program will do it for you.\n\n```shell\n# TarDAL-DT\n# use official tardal-dt infer config and save images to runs/tardal-dt\npython infer.py --cfg configs/official/tardal-dt.yaml --save_dir runs/tardal-dt\n# TarDAL-TT\n# use official tardal-tt infer config and save images to runs/tardal-tt\npython infer.py --cfg configs/official/tardal-tt.yaml --save_dir runs/tardal-tt\n# TarDAL-CT\n# use official tardal-ct infer config and save images to runs/tardal-ct\npython infer.py --cfg configs/official/tardal-ct.yaml --save_dir runs/tardal-ct\n```\n\n#### Train\n\nWe provide some training script for you to train your own model.\n\nPlease note: The training code is only intended to assist in understanding the paper and is not recommended for direct application in\nproduction environments.\n\nUnlike previous code versions, you don't need to preprocess the data, we will automatically calculate the IQA weights and mask.\n\n```shell\n# TarDAL-DT\npython train.py --cfg configs/official/tardal-dt.yaml --auth $YOUR_WANDB_KEY\n# TarDAL-TT\npython train.py --cfg configs/official/tardal-tt.yaml --auth $YOUR_WANDB_KEY\n# TarDAL-CT\npython train.py --cfg configs/official/tardal-ct.yaml --auth $YOUR_WANDB_KEY\n```\n\nIf you want to base your approach on ours and extend it to a production environment, here are some additional suggestions for you.\n\n[Suggestion: A better train process for everyone.](assets/train_process.png)\n\n### Any Question\n\nIf you have any other questions about the code, please email [Zhanbo Huang](mailto:zbhuang917@hotmail.com).\n\nDue to job changes, the previous link `zbhuang@mail.dlut.edu.cn` is no longer available.\n\n## Citation\n\nIf this work has been helpful to you, please feel free to cite our paper!\n\n```\n@inproceedings{liu2022target,\n  title={Target-aware Dual Adversarial Learning and a Multi-scenario Multi-Modality Benchmark to Fuse Infrared and Visible for Object Detection},\n  author={Liu, Jinyuan and Fan, Xin and Huang, Zhanbo and Wu, Guanyao and Liu, Risheng and Zhong, Wei and Luo, Zhongxuan},\n  booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},\n  pages={5802--5811},\n  year={2022}\n}\n```\n"
  },
  {
    "path": "assets/sample/s1/meta/pred.txt",
    "content": "M3FD_00471.png\nROAD_040.jpg\nTNO_028.bmp\n"
  },
  {
    "path": "config/__init__.py",
    "content": "class ConfigDict(dict):\n    __setattr__ = dict.__setitem__\n    __getattr__ = dict.__getitem__\n\n\ndef from_dict(obj) -> ConfigDict:\n    if not isinstance(obj, dict):\n        return obj\n    d = ConfigDict()\n    for k, v in obj.items():\n        d[k] = from_dict(v)\n    return d\n"
  },
  {
    "path": "config/default.yaml",
    "content": "# base settings\ndevice   : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...)\nsave_dir : 'cache' # folder used for saving the model, logs results\n\n# debug mode settings\ndebug    :\n  log       : INFO # log level\n  wandb_mode: 'online' # wandb connection mode\n  fast_run  : false # use a small subset of the dataset for debugging code\n\n# framework training strategy:\n#   backward method: fuse (direct training DT)\n#   backward method: detect (task-oriented training TT)\n#   backward method: fuse & detect (cooperative training CT)\nstrategy : fuse & detect\n\n# fuse network settings: core of infrared and visible fusion\nfuse     :\n  dim       : 32 # features base dimensions for generator and discriminator\n  depth     : 3 # depth of dense architecture\n  pretrained: weights/v1/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters\n\n# detect network settings: available if framework in joint mode (detect, fuse + detect)\ndetect   :\n  model     : yolov5s # yolo model (yolov5 n,s,m,l,x)\n  channels  : 3 # input channels (3: rgb or 1: grayscale)\n  pretrained: weights/v1/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters\n\n# saliency network settings: generating mask for training tardal\nsaliency :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth\n\n# iqa settings: information measurement\niqa      :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth\n\n# dataset settings:\n#   we provide four built-in representative datasets,\n#   if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue.\ndataset  :\n  name  : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.)\n  root  : data/m3fd # dataset root path\n  # only available for fuse & detect\n  detect:\n    hsv        : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing]\n    degrees    : 0 # image rotation (+/- degrees) [developing]\n    translate  : 0.1 # image translation (+/- fraction) [developing]\n    scale      : 0.9  # image scale (+/- gain) [developing]\n    shear      : 0.0  # image shear (+/- degrees) [developing]\n    perspective: 0.0  # image perspective (+/- fraction), range 0-0.001 [developing]\n    flip_ud    : 0.0  # image flip up-down (probability)\n    flip_lr    : 0.5  # image flip left-right (probability)\n\n# train settings:\ntrain    :\n  image_size   : [ 320, 320 ] # training image size in (h, w)\n  batch_size   : 16 # batch size used to train\n  num_workers  : 8 # number of workers used in data loading\n  epochs       : 300 # number of epochs to train\n  eval_interval: 1 # evaluation interval during training\n  save_interval: 5 # save interval during training\n  freeze       : [ ]  # freeze layers (e.g. backbone, head, ...)\n\n# inference settings:\ninference:\n  batch_size : 8 # batch size used to train\n  num_workers: 8 # number of workers used in data loading\n  use_eval   : true # use eval mode in inference mode, default true, false for v0 weights.\n  grayscale  : false # ignore dataset settings, save as grayscale image\n  save_txt   : false # save label file\n\n# loss settings:\nloss     :\n  # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det\n  fuse  :\n    src_fn: v0 # v0: 0.01*ssim + 0.99*l1 | v1: ms-ssim\n    src   : 1  # src loss gain (v0: 0.8)\n    adv   : 0 # adv loss gain (v0: 0.2)\n    t_adv : 1 # target loss gain (v0: 1)\n    d_adv : 1 # detail loss gain (v0: 1)\n    d_mask: false # use mask for detail discriminator (v0: true)\n    d_warm: 1 # discriminator warmup epochs\n  # detect loss: box + cls + obj\n  detect:\n    box     : 0.05 # box loss gain\n    cls     : 0.3 # cls loss gain\n    cls_pw  : 1.0 # cls BCELoss positive weight\n    obj     : 0.7 # obj loss gain (scale with pixels)\n    obj_pw  : 1.0 # obj BCELoss positive weight\n    iou_t   : 0.20 # IoU training threshold\n    anchor_t: 4.0 # anchor-multiple threshold\n    fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)\n  # bridge\n  bridge:\n    fuse  : 0.5 # fuse loss gain for generator\n    detect: 0.5 # detect loss gain for generator\n    warm  : 2 # bridge warm up epochs (det -> det, fuse -> fuse)\n\n# optimizer settings:\noptimizer:\n  name        : sgd # optimizer name\n  lr_i        : 1.0e-2 # initial learning rate\n  lr_f        : 1.0e-1 # final learning rate (lr_i * lr_f)\n  momentum    : 0.937 # adam beta1\n  weight_decay: 5.0e-4 # decay rate used in optimizer\n  lr_d        : 1.0e-4 # discriminator learning rate\n\n# scheduler settings:\nscheduler:\n  warmup_epochs  : [ 2.0, 3.0 ] # start-[0]: bridge warm (keep const), [0]-[1]: normal warm, [1]-end: normal decay\n  warmup_momentum: 0.8 # warmup initial momentum\n  warmup_bias_lr : 0.1 # warmup initial bias lr\n"
  },
  {
    "path": "config/exp/i-tardal-dt.yaml",
    "content": "# base settings\ndevice   : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...)\nsave_dir : 'cache' # folder used for saving the model, logs results\n\n# debug mode settings\ndebug    :\n  wandb_mode: 'online' # wandb connection mode\n  fast_run  : false # use a small subset of the dataset for debugging code\n\n# framework training strategy:\n#   backward method: fuse (direct training DT)\n#   backward method: detect (task-oriented training TT)\n#   backward method: fuse & detect (cooperative training CT)\nstrategy : fuse & detect\n\n# fuse network settings: core of infrared and visible fusion\nfuse     :\n  dim       : 32 # features base dimensions for generator and discriminator\n  depth     : 3 # depth of dense architecture\n  pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters\n\n# detect network settings: available if framework in joint mode (detect, fuse + detect)\ndetect   :\n  model     : yolov5s # yolo model (yolov5 n,s,m,l,x)\n  channels  : 3 # input channels (3: rgb or 1: grayscale)\n  pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters\n\n# saliency network settings: generating mask for training tardal\nsaliency :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth\n\n# iqa settings: information measurement\niqa      :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth\n\n# dataset settings:\n#   we provide four built-in representative datasets,\n#   if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue.\ndataset  :\n  name  : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.)\n  root  : data/m3fd # dataset root path\n  # only available for fuse & detect\n  detect:\n    hsv        : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing]\n    degrees    : 0 # image rotation (+/- degrees) [developing]\n    translate  : 0.1 # image translation (+/- fraction) [developing]\n    scale      : 0.9  # image scale (+/- gain) [developing]\n    shear      : 0.0  # image shear (+/- degrees) [developing]\n    perspective: 0.0  # image perspective (+/- fraction), range 0-0.001 [developing]\n    flip_ud    : 0.0  # image flip up-down (probability)\n    flip_lr    : 0.5  # image flip left-right (probability)\n\n# train settings:\ntrain    :\n  image_size   : [ 224, 224 ] # training image size in (h, w)\n  batch_size   : 32 # batch size used to train\n  num_workers  : 8 # number of workers used in data loading\n  epochs       : 1000 # number of epochs to train\n  eval_interval: 1 # evaluation interval during training\n  save_interval: 5 # save interval during training\n\n# inference settings:\ninference:\n  batch_size : 8 # batch size used to train\n  num_workers: 8 # number of workers used in data loading\n  use_eval   : ~ # use eval mode in inference mode, default true, false for v0 weights.\n  grayscale  : false # ignore dataset settings, save as grayscale image\n  save_txt   : false # save label file\n\n# loss settings:\nloss     :\n  # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det\n  fuse  :\n    src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim\n    src   : 0.8  # src loss gain (1 during v0)\n    adv   : 0.2 # adv loss gain (0.1 during v0)\n    t_adv : 0 # target loss gain\n    d_adv : 0 # detail loss gain\n    d_mask: false # use mask for detail discriminator (v0: true)\n    d_warm: 10 # discriminator warmup epochs\n  # detect loss: box + cls + obj\n  detect:\n    box     : 0.05 # box loss gain\n    cls     : 0.5 # cls loss gain\n    cls_pw  : 1.0 # cls BCELoss positive weight\n    obj     : 1.0 # obj loss gain (scale with pixels)\n    obj_pw  : 1.0 # obj BCELoss positive weight\n    iou_t   : 0.20 # IoU training threshold\n    anchor_t: 4.0 # anchor-multiple threshold\n    fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)\n  # bridge\n  bridge:\n    fuse  : 0.5 # fuse loss gain for generator\n    detect: 0.5 # detect loss gain for generator\n\n# optimizer settings:\noptimizer:\n  name        : adamw # optimizer name\n  lr_i        : 1.0e-3 # initial learning rate\n  lr_f        : 1.0e-3 # final learning rate\n  momentum    : 0.937 # adam beta1\n  weight_decay: 5.0e-4 # decay rate used in optimizer\n\n# scheduler settings:\nscheduler:\n  warmup_epochs  : 3.0 # warmup epochs\n  warmup_momentum: 0.8 # warmup initial momentum\n  warmup_bias_lr : 0.1 # warmup initial bias lr\n"
  },
  {
    "path": "config/exp/t-tardal-ct.yaml",
    "content": "# base settings\ndevice   : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...)\nsave_dir : 'cache' # folder used for saving the model, logs results\n\n# debug mode settings\ndebug    :\n  wandb_mode: 'online' # wandb connection mode\n  fast_run  : false # use a small subset of the dataset for debugging code\n\n# framework training strategy:\n#   backward method: fuse (direct training DT)\n#   backward method: detect (task-oriented training TT)\n#   backward method: fuse & detect (cooperative training CT)\nstrategy : fuse & detect\n\n# fuse network settings: core of infrared and visible fusion\nfuse     :\n  dim       : 32 # features base dimensions for generator and discriminator\n  depth     : 3 # depth of dense architecture\n  pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters\n\n# detect network settings: available if framework in joint mode (detect, fuse + detect)\ndetect   :\n  model     : yolov5s # yolo model (yolov5 n,s,m,l,x)\n  channels  : 3 # input channels (3: rgb or 1: grayscale)\n  pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters\n\n# saliency network settings: generating mask for training tardal\nsaliency :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth\n\n# iqa settings: information measurement\niqa      :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth\n\n# dataset settings:\n#   we provide four built-in representative datasets,\n#   if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue.\ndataset  :\n  name  : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.)\n  root  : data/m3fd # dataset root path\n  # only available for fuse & detect\n  detect:\n    hsv        : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing]\n    degrees    : 0 # image rotation (+/- degrees) [developing]\n    translate  : 0.1 # image translation (+/- fraction) [developing]\n    scale      : 0.9  # image scale (+/- gain) [developing]\n    shear      : 0.0  # image shear (+/- degrees) [developing]\n    perspective: 0.0  # image perspective (+/- fraction), range 0-0.001 [developing]\n    flip_ud    : 0.0  # image flip up-down (probability)\n    flip_lr    : 0.5  # image flip left-right (probability)\n\n# train settings:\ntrain    :\n  image_size   : [ 320, 320 ] # training image size in (h, w)\n  batch_size   : 16 # batch size used to train\n  num_workers  : 8 # number of workers used in data loading\n  epochs       : 1000 # number of epochs to train\n  eval_interval: 1 # evaluation interval during training\n  save_interval: 5 # save interval during training\n  freeze       : [ ]  # freeze layers (e.g. backbone, head, ...)\n\n# inference settings:\ninference:\n  batch_size : 8 # batch size used to train\n  num_workers: 8 # number of workers used in data loading\n  use_eval   : True # use eval mode in inference mode, default true, false for v0 weights.\n  grayscale  : false # ignore dataset settings, save as grayscale image\n  save_txt   : false # save label file\n\n# loss settings:\nloss     :\n  # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det\n  fuse  :\n    src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim\n    src   : 0.8  # src loss gain (1 during v0)\n    adv   : 0.2 # adv loss gain (0.1 during v0)\n    t_adv : 0 # target loss gain\n    d_adv : 0 # detail loss gain\n    d_mask: false # use mask for detail discriminator (v0: true)\n    d_warm: 10 # discriminator warmup epochs\n  # detect loss: box + cls + obj\n  detect:\n    box     : 0.05 # box loss gain\n    cls     : 0.5 # cls loss gain\n    cls_pw  : 1.0 # cls BCELoss positive weight\n    obj     : 1.0 # obj loss gain (scale with pixels)\n    obj_pw  : 1.0 # obj BCELoss positive weight\n    iou_t   : 0.20 # IoU training threshold\n    anchor_t: 4.0 # anchor-multiple threshold\n    fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)\n  # bridge\n  bridge:\n    fuse  : 0.5 # fuse loss gain for generator\n    detect: 0.5 # detect loss gain for generator\n\n# optimizer settings:\noptimizer:\n  name        : adamw # optimizer name\n  lr_i        : 1.0e-3 # initial learning rate\n  lr_f        : 1.0e-3 # final learning rate\n  momentum    : 0.937 # adam beta1\n  weight_decay: 5.0e-4 # decay rate used in optimizer\n\n# scheduler settings:\nscheduler:\n  warmup_epochs  : 3.0 # warmup epochs\n  warmup_momentum: 0.8 # warmup initial momentum\n  warmup_bias_lr : 0.1 # warmup initial bias lr\n"
  },
  {
    "path": "config/official/colab.yaml",
    "content": "# base settings\ndevice   : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...)\nsave_dir : 'cache' # folder used for saving the model, logs results\n\n# debug mode settings\ndebug    :\n  log       : INFO # log level\n  wandb_mode: 'offline' # wandb connection mode\n  fast_run  : false # use a small subset of the dataset for debugging code\n\n# framework training strategy:\n#   backward method: fuse (direct training DT)\n#   backward method: detect (task-oriented training TT)\n#   backward method: fuse & detect (cooperative training CT)\nstrategy : fuse\n\n# fuse network settings: core of infrared and visible fusion\nfuse     :\n  dim       : 32 # features base dimensions for generator and discriminator\n  depth     : 3 # depth of dense architecture\n  pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-ct.pth # ~: disable, path or url: load with pretrained parameters\n\n# detect network settings: available if framework in joint mode (detect, fuse + detect)\ndetect   :\n  model     : yolov5s # yolo model (yolov5 n,s,m,l,x)\n  channels  : 3 # input channels (3: rgb or 1: grayscale)\n  pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-ct.pth # ~: disable, path or url: load with pretrained parameters\n\n# saliency network settings: generating mask for training tardal\nsaliency :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth\n\n# iqa settings: information measurement\niqa      :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth\n\n# dataset settings:\n#   we provide four built-in representative datasets,\n#   if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue.\ndataset  :\n  name  : roadscene # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.)\n  root  : assets/sample/s1 # dataset root path\n  # only available for fuse & detect\n  detect:\n    hsv        : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing]\n    degrees    : 0 # image rotation (+/- degrees) [developing]\n    translate  : 0.1 # image translation (+/- fraction) [developing]\n    scale      : 0.9  # image scale (+/- gain) [developing]\n    shear      : 0.0  # image shear (+/- degrees) [developing]\n    perspective: 0.0  # image perspective (+/- fraction), range 0-0.001 [developing]\n    flip_ud    : 0.0  # image flip up-down (probability)\n    flip_lr    : 0.5  # image flip left-right (probability)\n\n# train settings:\ntrain    :\n  image_size   : [ 224, 224 ] # training image size in (h, w)\n  batch_size   : 32 # batch size used to train\n  num_workers  : 8 # number of workers used in data loading\n  epochs       : 1000 # number of epochs to train\n  eval_interval: 1 # evaluation interval during training\n  save_interval: 5 # save interval during training\n\n# inference settings:\ninference:\n  batch_size : 8 # batch size used to train\n  num_workers: 8 # number of workers used in data loading\n  use_eval   : ~ # use eval mode in inference mode, default true, false for v0 weights.\n  grayscale  : false # ignore dataset settings, save as grayscale image\n  save_txt   : false # save label file\n\n# loss settings:\nloss     :\n  # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det\n  fuse  :\n    src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim\n    src   : 0.8  # src loss gain (1 during v0)\n    adv   : 0.2 # adv loss gain (0.1 during v0)\n    t_adv : 0 # target loss gain\n    d_adv : 0 # detail loss gain\n    d_mask: false # use mask for detail discriminator (v0: true)\n    d_warm: 10 # discriminator warmup epochs\n  # detect loss: box + cls + obj\n  detect:\n    box     : 0.05 # box loss gain\n    cls     : 0.5 # cls loss gain\n    cls_pw  : 1.0 # cls BCELoss positive weight\n    obj     : 1.0 # obj loss gain (scale with pixels)\n    obj_pw  : 1.0 # obj BCELoss positive weight\n    iou_t   : 0.20 # IoU training threshold\n    anchor_t: 4.0 # anchor-multiple threshold\n    fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)\n  # bridge\n  bridge:\n    fuse  : 0.5 # fuse loss gain for generator\n    detect: 0.5 # detect loss gain for generator\n\n# optimizer settings:\noptimizer:\n  name        : adamw # optimizer name\n  lr_i        : 1.0e-3 # initial learning rate\n  lr_f        : 1.0e-3 # final learning rate\n  momentum    : 0.937 # adam beta1\n  weight_decay: 5.0e-4 # decay rate used in optimizer\n\n# scheduler settings:\nscheduler:\n  warmup_epochs  : 3.0 # warmup epochs\n  warmup_momentum: 0.8 # warmup initial momentum\n  warmup_bias_lr : 0.1 # warmup initial bias lr\n"
  },
  {
    "path": "config/official/infer/tardal-ct.yaml",
    "content": "# base settings\ndevice   : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...)\nsave_dir : 'cache' # folder used for saving the model, logs results\n\n# debug mode settings\ndebug    :\n  log       : INFO # log level\n  wandb_mode: 'online' # wandb connection mode\n  fast_run  : false # use a small subset of the dataset for debugging code\n\n# framework training strategy:\n#   backward method: fuse (direct training DT)\n#   backward method: detect (task-oriented training TT)\n#   backward method: fuse & detect (cooperative training CT)\nstrategy : fuse\n\n# fuse network settings: core of infrared and visible fusion\nfuse     :\n  dim       : 32 # features base dimensions for generator and discriminator\n  depth     : 3 # depth of dense architecture\n  pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-ct.pth # ~: disable, path or url: load with pretrained parameters\n\n# detect network settings: available if framework in joint mode (detect, fuse + detect)\ndetect   :\n  model     : yolov5s # yolo model (yolov5 n,s,m,l,x)\n  channels  : 3 # input channels (3: rgb or 1: grayscale)\n  pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-ct.pth # ~: disable, path or url: load with pretrained parameters\n\n# saliency network settings: generating mask for training tardal\nsaliency :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth\n\n# iqa settings: information measurement\niqa      :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth\n\n# dataset settings:\n#   we provide four built-in representative datasets,\n#   if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue.\ndataset  :\n  name  : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.)\n  root  : data/m3fd # dataset root path\n  # only available for fuse & detect\n  detect:\n    hsv        : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing]\n    degrees    : 0 # image rotation (+/- degrees) [developing]\n    translate  : 0.1 # image translation (+/- fraction) [developing]\n    scale      : 0.9  # image scale (+/- gain) [developing]\n    shear      : 0.0  # image shear (+/- degrees) [developing]\n    perspective: 0.0  # image perspective (+/- fraction), range 0-0.001 [developing]\n    flip_ud    : 0.0  # image flip up-down (probability)\n    flip_lr    : 0.5  # image flip left-right (probability)\n\n# train settings:\ntrain    :\n  image_size   : [ 224, 224 ] # training image size in (h, w)\n  batch_size   : 32 # batch size used to train\n  num_workers  : 8 # number of workers used in data loading\n  epochs       : 1000 # number of epochs to train\n  eval_interval: 1 # evaluation interval during training\n  save_interval: 5 # save interval during training\n\n# inference settings:\ninference:\n  batch_size : 8 # batch size used to train\n  num_workers: 8 # number of workers used in data loading\n  use_eval   : ~ # use eval mode in inference mode, default true, false for v0 weights.\n  grayscale  : false # ignore dataset settings, save as grayscale image\n  save_txt   : false # save label file\n\n# loss settings:\nloss     :\n  # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det\n  fuse  :\n    src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim\n    src   : 0.8  # src loss gain (1 during v0)\n    adv   : 0.2 # adv loss gain (0.1 during v0)\n    t_adv : 0 # target loss gain\n    d_adv : 0 # detail loss gain\n    d_mask: false # use mask for detail discriminator (v0: true)\n    d_warm: 10 # discriminator warmup epochs\n  # detect loss: box + cls + obj\n  detect:\n    box     : 0.05 # box loss gain\n    cls     : 0.5 # cls loss gain\n    cls_pw  : 1.0 # cls BCELoss positive weight\n    obj     : 1.0 # obj loss gain (scale with pixels)\n    obj_pw  : 1.0 # obj BCELoss positive weight\n    iou_t   : 0.20 # IoU training threshold\n    anchor_t: 4.0 # anchor-multiple threshold\n    fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)\n  # bridge\n  bridge:\n    fuse  : 0.5 # fuse loss gain for generator\n    detect: 0.5 # detect loss gain for generator\n\n# optimizer settings:\noptimizer:\n  name        : adamw # optimizer name\n  lr_i        : 1.0e-3 # initial learning rate\n  lr_f        : 1.0e-3 # final learning rate\n  momentum    : 0.937 # adam beta1\n  weight_decay: 5.0e-4 # decay rate used in optimizer\n\n# scheduler settings:\nscheduler:\n  warmup_epochs  : 3.0 # warmup epochs\n  warmup_momentum: 0.8 # warmup initial momentum\n  warmup_bias_lr : 0.1 # warmup initial bias lr\n"
  },
  {
    "path": "config/official/infer/tardal-dt.yaml",
    "content": "# base settings\ndevice   : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...)\nsave_dir : 'cache' # folder used for saving the model, logs results\n\n# debug mode settings\ndebug    :\n  log       : INFO # log level\n  wandb_mode: 'online' # wandb connection mode\n  fast_run  : false # use a small subset of the dataset for debugging code\n\n# framework training strategy:\n#   backward method: fuse (direct training DT)\n#   backward method: detect (task-oriented training TT)\n#   backward method: fuse & detect (cooperative training CT)\nstrategy : fuse\n\n# fuse network settings: core of infrared and visible fusion\nfuse     :\n  dim       : 32 # features base dimensions for generator and discriminator\n  depth     : 3 # depth of dense architecture\n  pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters\n\n# detect network settings: available if framework in joint mode (detect, fuse + detect)\ndetect   :\n  model     : yolov5s # yolo model (yolov5 n,s,m,l,x)\n  channels  : 3 # input channels (3: rgb or 1: grayscale)\n  pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters\n\n# saliency network settings: generating mask for training tardal\nsaliency :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth\n\n# iqa settings: information measurement\niqa      :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth\n\n# dataset settings:\n#   we provide four built-in representative datasets,\n#   if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue.\ndataset  :\n  name  : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.)\n  root  : data/m3fd # dataset root path\n  # only available for fuse & detect\n  detect:\n    hsv        : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing]\n    degrees    : 0 # image rotation (+/- degrees) [developing]\n    translate  : 0.1 # image translation (+/- fraction) [developing]\n    scale      : 0.9  # image scale (+/- gain) [developing]\n    shear      : 0.0  # image shear (+/- degrees) [developing]\n    perspective: 0.0  # image perspective (+/- fraction), range 0-0.001 [developing]\n    flip_ud    : 0.0  # image flip up-down (probability)\n    flip_lr    : 0.5  # image flip left-right (probability)\n\n# train settings:\ntrain    :\n  image_size   : [ 224, 224 ] # training image size in (h, w)\n  batch_size   : 32 # batch size used to train\n  num_workers  : 8 # number of workers used in data loading\n  epochs       : 1000 # number of epochs to train\n  eval_interval: 1 # evaluation interval during training\n  save_interval: 5 # save interval during training\n\n# inference settings:\ninference:\n  batch_size : 8 # batch size used to train\n  num_workers: 8 # number of workers used in data loading\n  use_eval   : ~ # use eval mode in inference mode, default true, false for v0 weights.\n  grayscale  : false # ignore dataset settings, save as grayscale image\n  save_txt   : false # save label file\n\n# loss settings:\nloss     :\n  # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det\n  fuse  :\n    src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim\n    src   : 0.8  # src loss gain (1 during v0)\n    adv   : 0.2 # adv loss gain (0.1 during v0)\n    t_adv : 0 # target loss gain\n    d_adv : 0 # detail loss gain\n    d_mask: false # use mask for detail discriminator (v0: true)\n    d_warm: 10 # discriminator warmup epochs\n  # detect loss: box + cls + obj\n  detect:\n    box     : 0.05 # box loss gain\n    cls     : 0.5 # cls loss gain\n    cls_pw  : 1.0 # cls BCELoss positive weight\n    obj     : 1.0 # obj loss gain (scale with pixels)\n    obj_pw  : 1.0 # obj BCELoss positive weight\n    iou_t   : 0.20 # IoU training threshold\n    anchor_t: 4.0 # anchor-multiple threshold\n    fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)\n  # bridge\n  bridge:\n    fuse  : 0.5 # fuse loss gain for generator\n    detect: 0.5 # detect loss gain for generator\n\n# optimizer settings:\noptimizer:\n  name        : adamw # optimizer name\n  lr_i        : 1.0e-3 # initial learning rate\n  lr_f        : 1.0e-3 # final learning rate\n  momentum    : 0.937 # adam beta1\n  weight_decay: 5.0e-4 # decay rate used in optimizer\n\n# scheduler settings:\nscheduler:\n  warmup_epochs  : 3.0 # warmup epochs\n  warmup_momentum: 0.8 # warmup initial momentum\n  warmup_bias_lr : 0.1 # warmup initial bias lr\n"
  },
  {
    "path": "config/official/infer/tardal-tt.yaml",
    "content": "# base settings\ndevice   : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...)\nsave_dir : 'cache' # folder used for saving the model, logs results\n\n# debug mode settings\ndebug    :\n  log       : INFO # log level\n  wandb_mode: 'online' # wandb connection mode\n  fast_run  : false # use a small subset of the dataset for debugging code\n\n# framework training strategy:\n#   backward method: fuse (direct training DT)\n#   backward method: detect (task-oriented training TT)\n#   backward method: fuse & detect (cooperative training CT)\nstrategy : fuse\n\n# fuse network settings: core of infrared and visible fusion\nfuse     :\n  dim       : 32 # features base dimensions for generator and discriminator\n  depth     : 3 # depth of dense architecture\n  pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-tt.pth # ~: disable, path or url: load with pretrained parameters\n\n# detect network settings: available if framework in joint mode (detect, fuse + detect)\ndetect   :\n  model     : yolov5s # yolo model (yolov5 n,s,m,l,x)\n  channels  : 3 # input channels (3: rgb or 1: grayscale)\n  pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-tt.pth # ~: disable, path or url: load with pretrained parameters\n\n# saliency network settings: generating mask for training tardal\nsaliency :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth\n\n# iqa settings: information measurement\niqa      :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth\n\n# dataset settings:\n#   we provide four built-in representative datasets,\n#   if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue.\ndataset  :\n  name  : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.)\n  root  : data/m3fd # dataset root path\n  # only available for fuse & detect\n  detect:\n    hsv        : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing]\n    degrees    : 0 # image rotation (+/- degrees) [developing]\n    translate  : 0.1 # image translation (+/- fraction) [developing]\n    scale      : 0.9  # image scale (+/- gain) [developing]\n    shear      : 0.0  # image shear (+/- degrees) [developing]\n    perspective: 0.0  # image perspective (+/- fraction), range 0-0.001 [developing]\n    flip_ud    : 0.0  # image flip up-down (probability)\n    flip_lr    : 0.5  # image flip left-right (probability)\n\n# train settings:\ntrain    :\n  image_size   : [ 224, 224 ] # training image size in (h, w)\n  batch_size   : 32 # batch size used to train\n  num_workers  : 12 # number of workers used in data loading\n  epochs       : 1000 # number of epochs to train\n  eval_interval: 5 # evaluation interval during training\n  save_interval: 5 # save interval during training\n\n# inference settings:\ninference:\n  batch_size : 8 # batch size used to train\n  num_workers: 12 # number of workers used in data loading\n  use_eval   : true # use eval mode in inference mode, default true, false for v0 weights.\n  grayscale  : false # ignore dataset settings, save as grayscale image\n\n# loss settings:\nloss     :\n  # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det\n  fuse  :\n    src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim\n    src   : 0.8  # src loss gain (1 during v0)\n    adv   : 0.2 # adv loss gain (0.1 during v0)\n    t_adv : 0.5 # target loss gain\n    d_adv : 0.5 # detail loss gain\n    det   : 1.0 # det loss gain (available only for detect or fuse+detect mode)\n    d_mask: false # use mask for detail discriminator (v0: true)\n    d_warm: 10 # discriminator warmup epochs\n  # detect loss: box + cls + obj\n  detect:\n    box     : 0.05 # box loss gain\n    cls     : 0.5 # cls loss gain\n    cls_pw  : 1.0 # cls BCELoss positive weight\n    obj     : 1.0 # obj loss gain (scale with pixels)\n    obj_pw  : 1.0 # obj BCELoss positive weight\n    iou_t   : 0.20 # IoU training threshold\n    anchor_t: 4.0 # anchor-multiple threshold\n    fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)\n  # bridge\n  bridge:\n    fuse  : 0.5 # fuse loss gain for generator\n    detect: 0.5 # detect loss gain for generator\n\n# optimizer settings:\noptimizer:\n  name        : adamw # optimizer name\n  lr_i        : 1.0e-3 # initial learning rate\n  lr_f        : 1.0e-3 # final learning rate\n  momentum    : 0.937 # adam beta1\n  weight_decay: 5.0e-4 # decay rate used in optimizer\n\n# scheduler settings:\nscheduler:\n  warmup_epochs  : 3.0 # warmup epochs\n  warmup_momentum: 0.8 # warmup initial momentum\n  warmup_bias_lr : 0.1 # warmup initial bias lr\n"
  },
  {
    "path": "config/official/train/tardal-ct.yaml",
    "content": "# base settings\ndevice   : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...)\nsave_dir : 'cache' # folder used for saving the model, logs results\n\n# debug mode settings\ndebug    :\n  log       : INFO # log level\n  wandb_mode: 'online' # wandb connection mode\n  fast_run  : false # use a small subset of the dataset for debugging code\n\n# framework training strategy:\n#   backward method: fuse (direct training DT)\n#   backward method: detect (task-oriented training TT)\n#   backward method: fuse & detect (cooperative training CT)\nstrategy : fuse & detect\n\n# fuse network settings: core of infrared and visible fusion\nfuse     :\n  dim       : 32 # features base dimensions for generator and discriminator\n  depth     : 3 # depth of dense architecture\n  pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters\n\n# detect network settings: available if framework in joint mode (detect, fuse + detect)\ndetect   :\n  model     : yolov5s # yolo model (yolov5 n,s,m,l,x)\n  channels  : 3 # input channels (3: rgb or 1: grayscale)\n  pretrained: ~ # ~: disable, path or url: load with pretrained parameters\n\n# saliency network settings: generating mask for training tardal\nsaliency :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth\n\n# iqa settings: information measurement\niqa      :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth\n\n# dataset settings:\n#   we provide four built-in representative datasets,\n#   if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue.\ndataset  :\n  name  : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.)\n  root  : data/m3fd # dataset root path\n  # only available for fuse & detect\n  detect:\n    hsv        : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing]\n    degrees    : 0 # image rotation (+/- degrees) [developing]\n    translate  : 0.1 # image translation (+/- fraction) [developing]\n    scale      : 0.9  # image scale (+/- gain) [developing]\n    shear      : 0.0  # image shear (+/- degrees) [developing]\n    perspective: 0.0  # image perspective (+/- fraction), range 0-0.001 [developing]\n    flip_ud    : 0.0  # image flip up-down (probability)\n    flip_lr    : 0.5  # image flip left-right (probability)\n\n# train settings:\ntrain    :\n  image_size   : [ 320, 320 ] # training image size in (h, w)\n  batch_size   : 16 # batch size used to train\n  num_workers  : 8 # number of workers used in data loading\n  epochs       : 300 # number of epochs to train\n  eval_interval: 1 # evaluation interval during training\n  save_interval: 5 # save interval during training\n  freeze       : [ ]  # freeze layers (e.g. backbone, head, ...)\n\n# inference settings:\ninference:\n  batch_size : 8 # batch size used to train\n  num_workers: 8 # number of workers used in data loading\n  use_eval   : true # use eval mode in inference mode, default true, false for v0 weights.\n  grayscale  : false # ignore dataset settings, save as grayscale image\n  save_txt   : false # save label file\n\n# loss settings:\nloss     :\n  # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det\n  fuse  :\n    src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim\n    src   : 0.8  # src loss gain (1 during v0)\n    adv   : 0.2 # adv loss gain (0.1 during v0)\n    t_adv : 0.5 # target loss gain\n    d_adv : 0.5 # detail loss gain\n    det   : 1.0 # det loss gain (available only for detect or fuse+detect mode)\n    d_mask: false # use mask for detail discriminator (v0: true)\n    d_warm: 10 # discriminator warmup epochs\n  # detect loss: box + cls + obj\n  detect:\n    box     : 0.05 # box loss gain\n    cls     : 0.3 # cls loss gain\n    cls_pw  : 1.0 # cls BCELoss positive weight\n    obj     : 0.7 # obj loss gain (scale with pixels)\n    obj_pw  : 1.0 # obj BCELoss positive weight\n    iou_t   : 0.20 # IoU training threshold\n    anchor_t: 4.0 # anchor-multiple threshold\n    fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)\n  # bridge\n  bridge:\n    fuse  : 0.5 # fuse loss gain for generator\n    detect: 0.5 # detect loss gain for generator\n    warm  : 2 # bridge warm up epochs (det -> det, fuse -> fuse)\n\n# optimizer settings:\noptimizer:\n  name        : sgd # optimizer name\n  lr_i        : 1.0e-2 # initial learning rate\n  lr_f        : 1.0e-1 # final learning rate (lr_i * lr_f)\n  momentum    : 0.937 # adam beta1\n  weight_decay: 5.0e-4 # decay rate used in optimizer\n  lr_d        : 1.0e-4 # discriminator learning rate\n\n# scheduler settings:\nscheduler:\n  warmup_epochs  : [ 2.0, 3.0 ] # start-[0]: bridge warm (keep const), [0]-[1]: normal warm, [1]-end: normal decay\n  warmup_momentum: 0.8 # warmup initial momentum\n  warmup_bias_lr : 0.1 # warmup initial bias lr\n"
  },
  {
    "path": "config/official/train/tardal-dt.yaml",
    "content": "# base settings\ndevice   : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...)\nsave_dir : 'cache' # folder used for saving the model, logs results\n\n# debug mode settings\ndebug    :\n  log       : INFO # log level\n  wandb_mode: 'online' # wandb connection mode\n  fast_run  : false # use a small subset of the dataset for debugging code\n\n# framework training strategy:\n#   backward method: fuse (direct training DT)\n#   backward method: detect (task-oriented training TT)\n#   backward method: fuse & detect (cooperative training CT)\nstrategy : fuse\n\n# fuse network settings: core of infrared and visible fusion\nfuse     :\n  dim       : 32 # features base dimensions for generator and discriminator\n  depth     : 3 # depth of dense architecture\n  pretrained: ~ # ~: disable, path or url: load with pretrained parameters\n\n# detect network settings: available if framework in joint mode (detect, fuse + detect)\ndetect   :\n  model     : yolov5s # yolo model (yolov5 n,s,m,l,x)\n  channels  : 3 # input channels (3: rgb or 1: grayscale)\n  pretrained: ~ # ~: disable, path or url: load with pretrained parameters\n\n# saliency network settings: generating mask for training tardal\nsaliency :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth\n\n# iqa settings: information measurement\niqa      :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth\n\n# dataset settings:\n#   we provide four built-in representative datasets,\n#   if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue.\ndataset  :\n  name  : RoadScene # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.)\n  root  : data/roadscene # dataset root path\n  # only available for fuse & detect\n  detect:\n    hsv        : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing]\n    degrees    : 0 # image rotation (+/- degrees) [developing]\n    translate  : 0.1 # image translation (+/- fraction) [developing]\n    scale      : 0.9  # image scale (+/- gain) [developing]\n    shear      : 0.0  # image shear (+/- degrees) [developing]\n    perspective: 0.0  # image perspective (+/- fraction), range 0-0.001 [developing]\n    flip_ud    : 0.0  # image flip up-down (probability)\n    flip_lr    : 0.5  # image flip left-right (probability)\n\n# train settings:\ntrain    :\n  image_size   : [ 224, 224 ] # training image size in (h, w)\n  batch_size   : 32 # batch size used to train\n  num_workers  : 12 # number of workers used in data loading\n  epochs       : 1000 # number of epochs to train\n  eval_interval: 5 # evaluation interval during training\n  save_interval: 5 # save interval during training\n  freeze       : [ ]  # freeze layers (e.g. backbone, head, ...)\n\n# inference settings:\ninference:\n  batch_size : 8 # batch size used to train\n  num_workers: 12 # number of workers used in data loading\n  use_eval   : true # use eval mode in inference mode, default true, false for v0 weights.\n  grayscale  : false # ignore dataset settings, save as grayscale image\n\n# loss settings:\nloss     :\n  # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det\n  fuse  :\n    src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim\n    src   : 0.8  # src loss gain (1 during v0)\n    adv   : 0.2 # adv loss gain (0.1 during v0)\n    t_adv : 0.5 # target loss gain\n    d_adv : 0.5 # detail loss gain\n    det   : 1.0 # det loss gain (available only for detect or fuse+detect mode)\n    d_mask: false # use mask for detail discriminator (v0: true)\n    d_warm: 10 # discriminator warmup epochs\n  # detect loss: box + cls + obj\n  detect:\n    box     : 0.05 # box loss gain\n    cls     : 0.5 # cls loss gain\n    cls_pw  : 1.0 # cls BCELoss positive weight\n    obj     : 1.0 # obj loss gain (scale with pixels)\n    obj_pw  : 1.0 # obj BCELoss positive weight\n    iou_t   : 0.20 # IoU training threshold\n    anchor_t: 4.0 # anchor-multiple threshold\n    fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)\n  # bridge\n  bridge:\n    fuse  : 0.5 # fuse loss gain for generator\n    detect: 0.5 # detect loss gain for generator\n\n# optimizer settings:\noptimizer:\n  name        : adamw # optimizer name\n  lr_i        : 1.0e-3 # initial learning rate\n  lr_f        : 1.0e-3 # final learning rate\n  momentum    : 0.937 # adam beta1\n  weight_decay: 5.0e-4 # decay rate used in optimizer\n\n# scheduler settings:\nscheduler:\n  warmup_epochs  : 3.0 # warmup epochs\n  warmup_momentum: 0.8 # warmup initial momentum\n  warmup_bias_lr : 0.1 # warmup initial bias lr\n"
  },
  {
    "path": "config/official/train/tardal-tt.yaml",
    "content": "# base settings\ndevice   : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...)\nsave_dir : 'cache' # folder used for saving the model, logs results\n\n# debug mode settings\ndebug    :\n  log       : INFO # log level\n  wandb_mode: 'online' # wandb connection mode\n  fast_run  : false # use a small subset of the dataset for debugging code\n\n# framework training strategy:\n#   backward method: fuse (direct training DT)\n#   backward method: detect (task-oriented training TT)\n#   backward method: fuse & detect (cooperative training CT)\nstrategy : detect\n\n# fuse network settings: core of infrared and visible fusion\nfuse     :\n  dim       : 32 # features base dimensions for generator and discriminator\n  depth     : 3 # depth of dense architecture\n  pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters\n\n# detect network settings: available if framework in joint mode (detect, fuse + detect)\ndetect   :\n  model     : yolov5s # yolo model (yolov5 n,s,m,l,x)\n  channels  : 3 # input channels (3: rgb or 1: grayscale)\n  pretrained: ~ # ~: disable, path or url: load with pretrained parameters\n\n# saliency network settings: generating mask for training tardal\nsaliency :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth\n\n# iqa settings: information measurement\niqa      :\n  url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth\n\n# dataset settings:\n#   we provide four built-in representative datasets,\n#   if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue.\ndataset  :\n  name  : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.)\n  root  : data/m3fd # dataset root path\n  # only available for fuse & detect\n  detect:\n    hsv        : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing]\n    degrees    : 0 # image rotation (+/- degrees) [developing]\n    translate  : 0.1 # image translation (+/- fraction) [developing]\n    scale      : 0.9  # image scale (+/- gain) [developing]\n    shear      : 0.0  # image shear (+/- degrees) [developing]\n    perspective: 0.0  # image perspective (+/- fraction), range 0-0.001 [developing]\n    flip_ud    : 0.0  # image flip up-down (probability)\n    flip_lr    : 0.5  # image flip left-right (probability)\n\n# train settings:\ntrain    :\n  image_size   : [ 320, 320 ] # training image size in (h, w)\n  batch_size   : 16 # batch size used to train\n  num_workers  : 8 # number of workers used in data loading\n  epochs       : 300 # number of epochs to train\n  eval_interval: 1 # evaluation interval during training\n  save_interval: 5 # save interval during training\n  freeze       : [ ]  # freeze layers (e.g. backbone, head, ...)\n\n# inference settings:\ninference:\n  batch_size : 8 # batch size used to train\n  num_workers: 8 # number of workers used in data loading\n  use_eval   : true # use eval mode in inference mode, default true, false for v0 weights.\n  grayscale  : false # ignore dataset settings, save as grayscale image\n\n# loss settings:\nloss     :\n  # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det\n  fuse  :\n    src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim\n    src   : 0.8  # src loss gain (1 during v0)\n    adv   : 0.2 # adv loss gain (0.1 during v0)\n    t_adv : 0.5 # target loss gain\n    d_adv : 0.5 # detail loss gain\n    det   : 1.0 # det loss gain (available only for detect or fuse+detect mode)\n    d_mask: false # use mask for detail discriminator (v0: true)\n    d_warm: 10 # discriminator warmup epochs\n  # detect loss: box + cls + obj\n  detect:\n    box     : 0.05 # box loss gain\n    cls     : 0.3 # cls loss gain\n    cls_pw  : 1.0 # cls BCELoss positive weight\n    obj     : 0.7 # obj loss gain (scale with pixels)\n    obj_pw  : 1.0 # obj BCELoss positive weight\n    iou_t   : 0.20 # IoU training threshold\n    anchor_t: 4.0 # anchor-multiple threshold\n    fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)\n  # bridge\n  bridge:\n    fuse  : 0.5 # fuse loss gain for generator\n    detect: 0.5 # detect loss gain for generator\n    warm  : 2 # bridge warm up epochs (det -> det, fuse -> fuse)\n\n# optimizer settings:\noptimizer:\n  name        : sgd # optimizer name\n  lr_i        : 1.0e-2 # initial learning rate\n  lr_f        : 1.0e-1 # final learning rate (lr_i * lr_f)\n  momentum    : 0.937 # adam beta1\n  weight_decay: 5.0e-4 # decay rate used in optimizer\n  lr_d        : 1.0e-4 # discriminator learning rate\n\n# scheduler settings:\nscheduler:\n  warmup_epochs  : [ 2.0, 3.0 ] # start-[0]: bridge warm (keep const), [0]-[1]: normal warm, [1]-end: normal decay\n  warmup_momentum: 0.8 # warmup initial momentum\n  warmup_bias_lr : 0.1 # warmup initial bias lr\n"
  },
  {
    "path": "data/README.md",
    "content": "# Dataset Configure Reference \n\n## Official Supported Datasets\n\n* TNO: fuse\n* RoadScene: fuse\n* MultiSpectral: fuse + detect\n* M3FD: fuse + detect\n\n## Other Datasets\n\nYou can write scripts for your own custom dataset in `loader/{$NAME}.py`, and raise a pull request (optional).\n\n## Prepare\n\nDatasets should have the following structure:\n\n```\ndata\n|__ TNO // name of the dataset\n    |__ ir // infrared images\n    |__ vi // visible images\n    |__ meta // dataset meta information\n        |__ train.txt // image name for training\n        |__ val.txt // image name for validation\n|__ M3FD // name of the dataset\n    |__ ir // infrared images\n    |__ vi // visible images\n    |__ labels // object labels (ground truth, cxcywh)\n    |__ meta // dataset meta information\n        |__ train.txt // image name for training\n        |__ val.txt // image name for validation\n```\n"
  },
  {
    "path": "functions/__init__.py",
    "content": ""
  },
  {
    "path": "functions/div_loss.py",
    "content": "import logging\n\nimport torch\nimport torch.autograd as autograd\n\n\ndef div_loss(disc, real_x, fake_x, wp: int = 6, eps: float = 1e-6):\n    logging.debug(f'calculating div: real {real_x.mean():.2f}, fake {fake_x.mean():.2f}')\n    alpha = torch.rand((real_x.shape[0], 1, 1, 1)).cuda()\n    tmp_x = (alpha * real_x + (1 - alpha) * fake_x).requires_grad_(True)\n    tmp_y = disc(tmp_x)\n    grad = autograd.grad(\n        outputs=tmp_y,\n        inputs=tmp_x,\n        grad_outputs=torch.ones_like(tmp_y),\n        create_graph=True,\n        retain_graph=True,\n        only_inputs=True,\n    )[0]\n    grad = grad.view(tmp_x.shape[0], -1) + eps\n    div = (grad.norm(2, dim=1) ** wp).mean()\n    return div\n"
  },
  {
    "path": "functions/get_param_groups.py",
    "content": "from typing import List\n\nfrom torch import nn\n\n\ndef get_param_groups(module) -> tuple[List, List, List]:\n    group = [], [], []\n    bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k)  # normalization layers\n    for v in module.modules():\n        if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):\n            \"bias\"\n            group[2].append(v.bias)\n        if isinstance(v, bn):\n            \"weight (no decay)\"\n            group[1].append(v.weight)\n        elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):\n            \"weight (with decay)\"\n            group[0].append(v.weight)\n    return group\n"
  },
  {
    "path": "infer.py",
    "content": "import argparse\nimport logging\nfrom pathlib import Path\n\nimport torch.backends.cudnn\nimport yaml\n\nimport scripts\nfrom config import from_dict\n\nif __name__ == '__main__':\n    # args parser\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', default='config/default.yaml', help='config file path')\n    parser.add_argument('--save_dir', default='runs/tmp', help='fusion result save folder')\n    args = parser.parse_args()\n\n    # init config\n    config = yaml.safe_load(Path(args.cfg).open('r'))\n    config = from_dict(config)  # convert dict to object\n    config = config\n\n    # init logger\n    log_f = '%(asctime)s | %(filename)s[line:%(lineno)d] | %(levelname)s | %(message)s'\n    logging.basicConfig(level=config.debug.log, format=log_f)\n\n    # init device & anomaly detector\n    torch.backends.cudnn.benchmark = True\n    torch.autograd.set_detect_anomaly(True)\n\n    # choose inference script\n    logging.info(f'enter {config.strategy} inference mode')\n    match config.strategy:\n        case 'fuse':\n            infer_p = getattr(scripts, 'InferF')\n            # check pretrained weights\n            if config.fuse.pretrained is None:\n                logging.warning('no pretrained weights specified, use official pretrained weights')\n                config.fuse.pretrained = 'https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth'\n        case 'fuse & detect':\n            infer_p = getattr(scripts, 'InferFD')\n            # check pretrained weights\n            if config.fuse.pretrained is None:\n                logging.warning('no pretrained weights specified, use official pretrained weights')\n                config.fuse.pretrained = 'https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-ct.pth'\n        case 'detect':\n            raise NotImplementedError('detect mode is useless during inference period, please use fuse & detect mode')\n        case _:\n            raise ValueError(f'unknown strategy: {config.strategy}')\n\n    # create script instance\n    infer = infer_p(config, args.save_dir)\n    infer.run()\n"
  },
  {
    "path": "loader/__init__.py",
    "content": "from loader.m3fd import M3FD\nfrom loader.roadscene import RoadScene\nfrom loader.tno import TNO\n\n__all__ = ['TNO', 'RoadScene', 'M3FD']\n"
  },
  {
    "path": "loader/m3fd.py",
    "content": "import logging\nimport random\nfrom pathlib import Path\nfrom typing import Literal, List, Optional\n\nimport torch\nfrom kornia.geometry import vflip, hflip, resize\nfrom torch import Tensor, Size\nfrom torch.utils.data import Dataset\nfrom torchvision.ops import box_convert\nfrom torchvision.transforms import Resize\nfrom torchvision.utils import draw_bounding_boxes\n\nfrom config import ConfigDict\nfrom loader.utils.checker import check_mask, check_image, check_labels, check_iqa, get_max_size\nfrom loader.utils.reader import gray_read, ycbcr_read, label_read, img_write, label_write\nfrom tools.scenario_reader import scenario_counter, generate_meta\n\n\nclass M3FD(Dataset):\n    type = 'fuse & detect'  # dataset type: 'fuse' or 'fuse & detect'\n    color = True  # dataset visible format: false -> 'gray' or true -> 'color'\n    classes = ['People', 'Car', 'Bus', 'Lamp', 'Motorcycle', 'Truck']\n    palette = ['#FF0000', '#C1C337', '#2FA7B4', '#F541C4', '#F84F2C', '#7D2CC8']\n\n    generate_meta_lock = False  # generate meta once\n\n    def __init__(self, root: str | Path, mode: Literal['train', 'val', 'pred'], config: ConfigDict):\n        super().__init__()\n        root = Path(root)\n        self.root = root\n        self.mode = mode\n        self.config = config\n\n        # check json meta config\n        if M3FD.generate_meta_lock is False:\n            if Path(root / 'meta' / 'scenario.json').exists():\n                logging.info('found scenario.json, generating train & val list.')\n                scenario_counter(root / 'meta' / 'scenario.json')\n                generate_meta(root)\n                M3FD.generate_meta_lock = True\n            else:\n                logging.warning('not found scenario.json, using current train & val list.')\n\n        # read corresponding list\n        img_list = Path(root / 'meta' / f'{mode}.txt').read_text().splitlines()\n        logging.info(f'load {len(img_list)} images from {root.name}')\n        self.img_list = img_list\n\n        # check images\n        check_image(root, img_list)\n\n        # check labels\n        self.labels = check_labels(root, img_list)\n\n        # more check\n        match mode:\n            case 'train' | 'val':\n                # check mask cache\n                check_mask(root, img_list, config)\n                # check iqa cache\n                check_iqa(root, img_list, config)\n            case _:\n                # get max shape\n                self.max_size = get_max_size(root, img_list)\n                self.transform_fn = Resize(size=self.max_size)\n\n    def __len__(self) -> int:\n        return len(self.img_list)\n\n    def __getitem__(self, index: int) -> dict:\n        # choose get item method\n        match self.mode:\n            case 'train' | 'val':\n                return self.train_val_item(index)\n            case _:\n                return self.pred_item(index)\n\n    def train_val_item(self, index: int) -> dict:\n        # image name, like '028.png'\n        name = self.img_list[index]\n        logging.debug(f'train-val mode: loading item {name}')\n\n        # load infrared and visible\n        ir = gray_read(self.root / 'ir' / name)\n        vi, cbcr = ycbcr_read(self.root / 'vi' / name)\n\n        # load mask\n        mask = gray_read(self.root / 'mask' / name)\n\n        # load information measurement\n        ir_w = gray_read(self.root / 'iqa' / 'ir' / name)\n        vi_w = gray_read(self.root / 'iqa' / 'vi' / name)\n\n        # load label\n        label_p = Path(name).stem + '.txt'\n        labels = label_read(self.root / 'labels' / label_p)\n\n        # concat images for transform(s)\n        t = torch.cat([ir, vi, mask, ir_w, vi_w, cbcr], dim=0)\n\n        # transform (resize)\n        resize_fn = Resize(size=self.config.train.image_size)\n        t = resize_fn(t)\n\n        # transform (flip up-down)\n        if random.random() < self.config.dataset.detect.flip_ud:\n            t = vflip(t)\n            if len(labels):\n                labels[:, 2] = 1 - labels[:, 2]\n\n        # transform (flip left-right)\n        if random.random() < self.config.dataset.detect.flip_lr:\n            t = hflip(t)\n            if len(labels):\n                labels[:, 1] = 1 - labels[:, 1]\n\n        # transform labels (cls, x1, y1, x2, y2) -> (0, cls, ...)\n        labels_o = torch.zeros((len(labels), 6))\n        if len(labels):\n            labels_o[:, 1:] = labels\n\n        # unpack images\n        ir, vi, mask, ir_w, vi_w, cbcr = torch.split(t, [1, 1, 1, 1, 1, 2], dim=0)\n\n        # merge data\n        sample = {\n            'name': name,\n            'ir': ir, 'vi': vi,\n            'ir_w': ir_w, 'vi_w': vi_w, 'mask': mask, 'cbcr': cbcr,\n            'labels': labels_o\n        }\n\n        # return as expected\n        return sample\n\n    def pred_item(self, index: int) -> dict:\n        # image name, like '028.png'\n        name = self.img_list[index]\n        logging.debug(f'pred mode: loading item {name}')\n\n        # load infrared and visible\n        ir = gray_read(self.root / 'ir' / name)\n        vi, cbcr = ycbcr_read(self.root / 'vi' / name)\n\n        # transform (resize)\n        s = ir.shape[1:]\n        t = torch.cat([ir, vi, cbcr], dim=0)\n        ir, vi, cbcr = torch.split(self.transform_fn(t), [1, 1, 2], dim=0)\n\n        # merge data\n        sample = {'name': name, 'ir': ir, 'vi': vi, 'cbcr': cbcr, 'shape': s}\n\n        # return as expected\n        return sample\n\n    @staticmethod\n    def pred_save(fus: Tensor, names: List[str | Path], shape: List[Size], pred: Optional[Tensor] = None, save_txt: bool = False):\n        if pred is None:\n            return M3FD.pred_save_no_boxes(fus, names, shape)\n        return M3FD.pred_save_with_boxes(fus, names, shape, pred, save_txt)\n\n    @staticmethod\n    def pred_save_no_boxes(fus: Tensor, names: List[str | Path], shape: List[Size]):\n        for img_t, img_p, img_s in zip(fus, names, shape):\n            img_t = resize(img_t, img_s)\n            img_write(img_t, img_p)\n\n    @staticmethod\n    def pred_save_with_boxes(fus: Tensor, names: List[str | Path], shape: List[Size], pred: Tensor, save_txt: bool = False):\n        for img_t, img_p, img_s, pred_i in zip(fus, names, shape, pred):\n            # reshape target\n            cur_s = img_t.shape[1:]\n            scale_x, scale_y = cur_s[1] / img_s[1], cur_s[0] / img_s[0]\n            pred_i[:, :4] *= Tensor([scale_x, scale_y, scale_x, scale_y]).to(pred_i.device)\n            # reshape image\n            img_t = resize(img_t, img_s)\n            img = (img_t.clamp_(0, 1) * 255).to(torch.uint8)\n            # draw bounding box\n            pred_x = list(filter(lambda x: x[4] > 0.6, pred_i))\n            boxes = [x[:4] for x in pred_x]\n            cls_idx = [int(x[5].cpu().numpy()) for x in pred_x]\n            labels = [f'{M3FD.classes[cls]}: {x[4].cpu().numpy():.2f}' for cls, x in zip(cls_idx, pred_x)]\n            colors = [M3FD.palette[cls] for cls, x in zip(cls_idx, pred_x)]\n            if len(boxes):\n                img = draw_bounding_boxes(img, torch.stack(boxes, dim=0), labels, colors, width=2)\n            img = img.float() / 255\n            # save labeled images\n            img_p = Path(img_p.parent) / 'images' / img_p.name\n            img_write(img, img_p)\n            # save label txt\n            if save_txt:\n                txt_p = Path(str(img_p.parent).replace('images', 'labels')) / (img_p.stem + '.txt')\n                txt_p.unlink(missing_ok=True)\n                txt_p.touch()\n                pred_i[:, :4] /= Tensor([img_s[1], img_s[0], img_s[1], img_s[0]]).to(pred_i.device)\n                pred_i[:, :4] = box_convert(pred_i[:, :4], 'xyxy', 'cxcywh')\n                label_write(pred_i, txt_p)\n\n    @staticmethod\n    def collate_fn(data: List[dict]) -> dict:\n        # keys\n        keys = data[0].keys()\n        # merge\n        new_data = {}\n        for key in keys:\n            k_data = [d[key] for d in data]\n            match key:\n                case 'name' | 'shape':\n                    # (name, name)\n                    new_data[key] = k_data\n                case 'labels':\n                    # (labels, image_index)\n                    for i, lb in enumerate(k_data):\n                        lb[:, 0] = i\n                    new_data[key] = torch.cat(k_data, dim=0)\n                case _:\n                    # (img, img)\n                    new_data[key] = torch.stack(k_data, dim=0)\n        # return as expected\n        return new_data\n"
  },
  {
    "path": "loader/roadscene.py",
    "content": "import logging\nfrom pathlib import Path\nfrom typing import Literal, List\n\nimport torch\nfrom kornia.geometry import resize\nfrom torch import Tensor, Size\nfrom torch.utils.data import Dataset\nfrom torchvision.transforms import Resize\n\nfrom config import ConfigDict\nfrom loader.utils.checker import check_mask, check_image, check_iqa, get_max_size\nfrom loader.utils.reader import gray_read, ycbcr_read, img_write\n\n\nclass RoadScene(Dataset):\n    type = 'fuse'  # dataset type: 'fuse' or 'fuse & detect'\n    color = True  # dataset visible format: false -> 'gray' or true -> 'color'\n\n    def __init__(self, root: str | Path, mode: Literal['train', 'val', 'pred'], config: ConfigDict):\n        super().__init__()\n        root = Path(root)\n        self.root = root\n        self.mode = mode\n\n        # read corresponding list\n        img_list = Path(root / 'meta' / f'{mode}.txt').read_text().splitlines()\n        logging.info(f'load {len(img_list)} images from {root.name}')\n        self.img_list = img_list\n\n        # check images\n        check_image(root, img_list)\n\n        # more check\n        match mode:\n            case 'train' | 'val':\n                # check mask cache\n                check_mask(root, img_list, config)\n                # check iqa cache\n                check_iqa(root, img_list, config)\n            case _:\n                # get max shape\n                self.max_size = get_max_size(root, img_list)\n\n        # choose transform\n        match mode:\n            case 'train' | 'val':\n                self.transform_fn = Resize(size=config.train.image_size)\n            case _:\n                self.transform_fn = Resize(size=self.max_size)\n\n    def __len__(self) -> int:\n        return len(self.img_list)\n\n    def __getitem__(self, index: int) -> dict:\n        # choose get item method\n        match self.mode:\n            case 'train' | 'val':\n                return self.train_val_item(index)\n            case _:\n                return self.pred_item(index)\n\n    def train_val_item(self, index: int) -> dict:\n        # image name, like '003.png'\n        name = self.img_list[index]\n        logging.debug(f'train-val mode: loading item {name}')\n\n        # load infrared and visible\n        ir = gray_read(self.root / 'ir' / name)\n        vi, cbcr = ycbcr_read(self.root / 'vi' / name)\n\n        # load mask\n        mask = gray_read(self.root / 'mask' / name)\n\n        # load information measurement\n        ir_w = gray_read(self.root / 'iqa' / 'ir' / name)\n        vi_w = gray_read(self.root / 'iqa' / 'vi' / name)\n\n        # transform (resize)\n        t = torch.cat([ir, vi, mask, ir_w, vi_w, cbcr], dim=0)\n        ir, vi, mask, ir_w, vi_w, cbcr = torch.split(self.transform_fn(t), [1, 1, 1, 1, 1, 2], dim=0)\n\n        # merge data\n        sample = {'name': name, 'ir': ir, 'vi': vi, 'ir_w': ir_w, 'vi_w': vi_w, 'mask': mask, 'cbcr': cbcr}\n\n        # return as expected\n        return sample\n\n    def pred_item(self, index: int) -> dict:\n        # image name, like '003.png'\n        name = self.img_list[index]\n        logging.debug(f'pred mode: loading item {name}')\n\n        # load infrared and visible\n        ir = gray_read(self.root / 'ir' / name)\n        vi, cbcr = ycbcr_read(self.root / 'vi' / name)\n\n        # transform (resize)\n        s = ir.shape[1:]\n        t = torch.cat([ir, vi, cbcr], dim=0)\n        ir, vi, cbcr = torch.split(self.transform_fn(t), [1, 1, 2], dim=0)\n\n        # merge data\n        sample = {'name': name, 'ir': ir, 'vi': vi, 'cbcr': cbcr, 'shape': s}\n\n        # return as expected\n        return sample\n\n    @staticmethod\n    def pred_save(fus: Tensor, names: List[str | Path], shape: List[Size]):\n        for img_t, img_p, img_s in zip(fus, names, shape):\n            img_t = resize(img_t, img_s)\n            img_write(img_t, img_p)\n\n    @staticmethod\n    def collate_fn(data: List[dict]) -> dict:\n        # keys\n        keys = data[0].keys()\n        # merge\n        new_data = {}\n        for key in keys:\n            k_data = [d[key] for d in data]\n            new_data[key] = k_data if isinstance(k_data[0], str) or isinstance(k_data[0], Size) else torch.stack(k_data)\n        # return as expected\n        return new_data\n"
  },
  {
    "path": "loader/tno.py",
    "content": "import logging\nfrom pathlib import Path\nfrom typing import Literal, List\n\nimport torch\nfrom kornia.geometry import resize\nfrom torch import Tensor, Size\nfrom torch.utils.data import Dataset\nfrom torchvision.transforms import Resize\n\nfrom config import ConfigDict\nfrom loader.utils.checker import check_mask, check_image, check_iqa, get_max_size\nfrom loader.utils.reader import gray_read, img_write\n\n\nclass TNO(Dataset):\n    type = 'fuse'  # dataset type: 'fuse' or 'fuse & detect'\n    color = False  # dataset visible format: false -> 'gray' or true -> 'color'\n\n    def __init__(self, root: str | Path, mode: Literal['train', 'val', 'pred'], config: ConfigDict):\n        super().__init__()\n        root = Path(root)\n        self.root = root\n        self.mode = mode\n\n        # read corresponding list\n        img_list = Path(root / 'meta' / f'{mode}.txt').read_text().splitlines()\n        logging.info(f'load {len(img_list)} images from {root.name}')\n        self.img_list = img_list\n\n        # check images\n        check_image(root, img_list)\n\n        # more check\n        match mode:\n            case 'train' | 'val':\n                # check mask cache\n                check_mask(root, img_list, config)\n                # check iqa cache\n                check_iqa(root, img_list, config)\n            case _:\n                # get max shape\n                self.max_size = get_max_size(root, img_list)\n\n        # choose transform\n        match mode:\n            case 'train' | 'val':\n                self.transform_fn = Resize(size=config.train.image_size)\n            case _:\n                self.transform_fn = Resize(size=self.max_size)\n\n    def __len__(self) -> int:\n        return len(self.img_list)\n\n    def __getitem__(self, index: int) -> dict:\n        # choose get item method\n        match self.mode:\n            case 'train' | 'val':\n                return self.train_val_item(index)\n            case _:\n                return self.pred_item(index)\n\n    def train_val_item(self, index: int) -> dict:\n        # image name, like '028.png'\n        name = self.img_list[index]\n        logging.debug(f'train-val mode: loading item {name}')\n\n        # load infrared and visible\n        ir = gray_read(self.root / 'ir' / name)\n        vi = gray_read(self.root / 'vi' / name)\n\n        # load mask\n        mask = gray_read(self.root / 'mask' / name)\n\n        # load information measurement\n        ir_w = gray_read(self.root / 'iqa' / 'ir' / name)\n        vi_w = gray_read(self.root / 'iqa' / 'vi' / name)\n\n        # transform (resize)\n        t = torch.cat([ir, vi, mask, ir_w, vi_w], dim=0)\n        ir, vi, mask, ir_w, vi_w = torch.chunk(self.transform_fn(t), chunks=5, dim=0)\n\n        # merge data\n        sample = {'name': name, 'ir': ir, 'vi': vi, 'ir_w': ir_w, 'vi_w': vi_w, 'mask': mask}\n\n        # return as expected\n        return sample\n\n    def pred_item(self, index: int) -> dict:\n        # image name, like '028.png'\n        name = self.img_list[index]\n        logging.debug(f'pred mode: loading item {name}')\n\n        # load infrared and visible\n        ir = gray_read(self.root / 'ir' / name)\n        vi = gray_read(self.root / 'vi' / name)\n\n        # transform (resize)\n        s = ir.shape[1:]\n        t = torch.cat([ir, vi], dim=0)\n        ir, vi = torch.chunk(self.transform_fn(t), chunks=2, dim=0)\n\n        # merge data\n        sample = {'name': name, 'ir': ir, 'vi': vi, 'shape': s}\n\n        # return as expected\n        return sample\n\n    @staticmethod\n    def pred_save(fus: Tensor, names: List[str | Path], shape: List[Size]):\n        for img_t, img_p, img_s in zip(fus, names, shape):\n            img_t = resize(img_t, img_s)\n            img_write(img_t, img_p)\n\n    @staticmethod\n    def collate_fn(data: List[dict]) -> dict:\n        # keys\n        keys = data[0].keys()\n        # merge\n        new_data = {}\n        for key in keys:\n            k_data = [d[key] for d in data]\n            new_data[key] = k_data if isinstance(k_data[0], str) or isinstance(k_data[0], Size) else torch.stack(k_data)\n        # return as expected\n        return new_data\n"
  },
  {
    "path": "loader/utils/__init__.py",
    "content": ""
  },
  {
    "path": "loader/utils/checker.py",
    "content": "import logging\nimport sys\nfrom pathlib import Path\nfrom typing import List\n\nfrom torch import Tensor, Size\nfrom tqdm import tqdm\n\nfrom config import ConfigDict\nfrom loader.utils.reader import label_read, gray_read\nfrom pipeline.iqa import IQA\nfrom pipeline.saliency import Saliency\n\n\ndef check_image(root: Path, img_list: List[str]):\n    assert (root / 'ir').exists() and (root / 'vi').exists(), f'ir and vi folders are required'\n    for img_name in img_list:\n        if not (root / 'ir' / img_name).exists() or not (root / 'vi' / img_name).exists():\n            logging.fatal(f'empty img {img_name} in {root.name}')\n            sys.exit(1)\n    logging.info('find all images on list')\n\n\ndef check_iqa(root: Path, img_list: List[str], config: ConfigDict):\n    iqa_cache = True\n    if (root / 'iqa').exists():\n        for img_name in img_list:\n            if not (root / 'iqa' / 'ir' / img_name).exists() or not (root / 'iqa' / 'vi' / img_name).exists():\n                iqa_cache = False\n                break\n    else:\n        iqa_cache = False\n    if iqa_cache:\n        logging.info(f'find iqa cache in folder, skip information measurement')\n    else:\n        logging.info(f'find no iqa cache in folder, start information measurement')\n        iqa = IQA(url=config.iqa.url)\n        iqa.inference(src=root, dst=root / 'iqa')\n\n\ndef check_labels(root: Path, img_list: List[str]) -> List[Tensor]:\n    assert (root / 'labels').exists(), f'labels folder is required'\n    labels = []\n    for img_name in img_list:\n        label_name = Path(img_name).stem + '.txt'\n        if not (root / 'labels' / label_name).exists():\n            logging.fatal(f'empty label {label_name} in {root.name}')\n            sys.exit(1)\n        labels.append(label_read(root / 'labels' / label_name))\n    logging.info('find all labels on list')\n    return labels\n\n\ndef check_mask(root: Path, img_list: List[str], config: ConfigDict):\n    mask_cache = True\n    if (root / 'mask').exists():\n        for img_name in img_list:\n            if not (root / 'mask' / img_name).exists():\n                mask_cache = False\n                break\n    else:\n        mask_cache = False\n    if mask_cache:\n        logging.info('find mask cache in folder, skip saliency detection')\n    else:\n        logging.info('find no mask cache in folder, start saliency detection')\n        saliency = Saliency(url=config.saliency.url)\n        saliency.inference(src=root / 'ir', dst=root / 'mask')\n\n\ndef get_max_size(root: Path, img_list: List[str]):\n    max_h, max_w = -1, -1\n    logging.info('find suitable size for prediction')\n    img_l = tqdm(img_list)\n    for img_name in img_l:\n        img_l.set_description('finding suitable size')\n        img = gray_read(root / 'ir' / img_name)\n        max_h = max(max_h, img.shape[1])\n        max_w = max(max_w, img.shape[2])\n    logging.info(f'max size in dataset: H:{max_h} x W:{max_w}')\n    return Size((max_h, max_w))\n"
  },
  {
    "path": "loader/utils/reader.py",
    "content": "from pathlib import Path\nfrom typing import Tuple\n\nimport cv2\nimport numpy\nimport torch\nfrom kornia import image_to_tensor, tensor_to_image\nfrom kornia.color import rgb_to_ycbcr, bgr_to_rgb, rgb_to_bgr\nfrom torch import Tensor\nfrom torchvision.ops import box_convert\n\n\ndef gray_read(img_path: str | Path) -> Tensor:\n    img_n = cv2.imread(str(img_path), cv2.IMREAD_GRAYSCALE)\n    img_t = image_to_tensor(img_n).float() / 255\n    return img_t\n\n\ndef ycbcr_read(img_path: str | Path) -> Tuple[Tensor, Tensor]:\n    img_n = cv2.imread(str(img_path), cv2.IMREAD_COLOR)\n    img_t = image_to_tensor(img_n).float() / 255\n    img_t = rgb_to_ycbcr(bgr_to_rgb(img_t))\n    y, cbcr = torch.split(img_t, [1, 2], dim=0)\n    return y, cbcr\n\n\ndef label_read(label_path: str | Path) -> Tensor:\n    target = numpy.loadtxt(str(label_path), dtype=numpy.float32)\n    labels = torch.from_numpy(target).view(-1, 5)  # (cls, cx, cy, w, h)\n    labels[:, 1:] = box_convert(labels[:, 1:], 'cxcywh', 'xyxy')  # (cls, x1, y1, x2, y2)\n    return labels\n\n\ndef img_write(img_t: Tensor, img_path: str | Path):\n    if img_t.shape[0] == 3:\n        img_t = rgb_to_bgr(img_t)\n    img_n = tensor_to_image(img_t.squeeze().cpu()) * 255\n    cv2.imwrite(str(img_path), img_n)\n\n\ndef label_write(pred_i: Tensor, txt_path: str | Path):\n    for *pos, conf, cls in pred_i.tolist():\n        line = (cls, *pos, conf)\n        with txt_path.open('a') as f:\n            f.write(('%g ' * len(line)).rstrip() % line + '\\n')\n"
  },
  {
    "path": "module/__init__.py",
    "content": ""
  },
  {
    "path": "module/detect/README.md",
    "content": "# Detect\n\nBased on YOLOv5.\n\nReference: [YOLOv5 official](https://github.com/ultralytics/yolov5)\n"
  },
  {
    "path": "module/detect/models/__init__.py",
    "content": ""
  },
  {
    "path": "module/detect/models/common.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nCommon modules\n\"\"\"\n\nimport json\nimport os\nimport platform\nimport sys\nimport warnings\nfrom collections import OrderedDict, namedtuple\nfrom copy import copy\nfrom pathlib import Path\n\nimport cv2\nimport math\nimport numpy as np\nimport pandas as pd\nimport requests\nimport torch\nimport torch.nn as nn\nimport yaml\nfrom PIL import Image\nfrom torch.cuda import amp\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\nif platform.system() != 'Windows':\n    ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative\n\nfrom utils.dataloaders import exif_transpose, letterbox\nfrom utils.general import (LOGGER, check_requirements, check_suffix, check_version, colorstr, increment_path,\n                           make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh)\nfrom utils.plots import Annotator, colors, save_one_box\nfrom utils.torch_utils import copy_attr, time_sync\n\n\ndef autopad(k, p=None):  # kernel, padding\n    # Pad to 'same'\n    if p is None:\n        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad\n    return p\n\n\nclass Conv(nn.Module):\n    # Standard convolution\n    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in, ch_out, kernel, stride, padding, groups\n        super().__init__()\n        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)\n        self.bn = nn.BatchNorm2d(c2)\n        self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())\n\n    def forward(self, x):\n        return self.act(self.bn(self.conv(x)))\n\n    def forward_fuse(self, x):\n        return self.act(self.conv(x))\n\n\nclass DWConv(Conv):\n    # Depth-wise convolution class\n    def __init__(self, c1, c2, k=1, s=1, act=True):  # ch_in, ch_out, kernel, stride, padding, groups\n        super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act)\n\n\nclass DWConvTranspose2d(nn.ConvTranspose2d):\n    # Depth-wise transpose convolution class\n    def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0):  # ch_in, ch_out, kernel, stride, padding, padding_out\n        super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))\n\n\nclass TransformerLayer(nn.Module):\n    # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)\n    def __init__(self, c, num_heads):\n        super().__init__()\n        self.q = nn.Linear(c, c, bias=False)\n        self.k = nn.Linear(c, c, bias=False)\n        self.v = nn.Linear(c, c, bias=False)\n        self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)\n        self.fc1 = nn.Linear(c, c, bias=False)\n        self.fc2 = nn.Linear(c, c, bias=False)\n\n    def forward(self, x):\n        x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x\n        x = self.fc2(self.fc1(x)) + x\n        return x\n\n\nclass TransformerBlock(nn.Module):\n    # Vision Transformer https://arxiv.org/abs/2010.11929\n    def __init__(self, c1, c2, num_heads, num_layers):\n        super().__init__()\n        self.conv = None\n        if c1 != c2:\n            self.conv = Conv(c1, c2)\n        self.linear = nn.Linear(c2, c2)  # learnable position embedding\n        self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))\n        self.c2 = c2\n\n    def forward(self, x):\n        if self.conv is not None:\n            x = self.conv(x)\n        b, _, w, h = x.shape\n        p = x.flatten(2).permute(2, 0, 1)\n        return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)\n\n\nclass Bottleneck(nn.Module):\n    # Standard bottleneck\n    def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, shortcut, groups, expansion\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = Conv(c1, c_, 1, 1)\n        self.cv2 = Conv(c_, c2, 3, 1, g=g)\n        self.add = shortcut and c1 == c2\n\n    def forward(self, x):\n        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))\n\n\nclass BottleneckCSP(nn.Module):\n    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = Conv(c1, c_, 1, 1)\n        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)\n        self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)\n        self.cv4 = Conv(2 * c_, c2, 1, 1)\n        self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)\n        self.act = nn.SiLU()\n        self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))\n\n    def forward(self, x):\n        y1 = self.cv3(self.m(self.cv1(x)))\n        y2 = self.cv2(x)\n        return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))\n\n\nclass CrossConv(nn.Module):\n    # Cross Convolution Downsample\n    def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):\n        # ch_in, ch_out, kernel, stride, groups, expansion, shortcut\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = Conv(c1, c_, (1, k), (1, s))\n        self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)\n        self.add = shortcut and c1 == c2\n\n    def forward(self, x):\n        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))\n\n\nclass C3(nn.Module):\n    # CSP Bottleneck with 3 convolutions\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = Conv(c1, c_, 1, 1)\n        self.cv2 = Conv(c1, c_, 1, 1)\n        self.cv3 = Conv(2 * c_, c2, 1)  # optional act=FReLU(c2)\n        self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))\n\n    def forward(self, x):\n        return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))\n\n\nclass C3x(C3):\n    # C3 module with cross-convolutions\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n        super().__init__(c1, c2, n, shortcut, g, e)\n        c_ = int(c2 * e)\n        self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)))\n\n\nclass C3TR(C3):\n    # C3 module with TransformerBlock()\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n        super().__init__(c1, c2, n, shortcut, g, e)\n        c_ = int(c2 * e)\n        self.m = TransformerBlock(c_, c_, 4, n)\n\n\nclass C3SPP(C3):\n    # C3 module with SPP()\n    def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):\n        super().__init__(c1, c2, n, shortcut, g, e)\n        c_ = int(c2 * e)\n        self.m = SPP(c_, c_, k)\n\n\nclass C3Ghost(C3):\n    # C3 module with GhostBottleneck()\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n        super().__init__(c1, c2, n, shortcut, g, e)\n        c_ = int(c2 * e)  # hidden channels\n        self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))\n\n\nclass SPP(nn.Module):\n    # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729\n    def __init__(self, c1, c2, k=(5, 9, 13)):\n        super().__init__()\n        c_ = c1 // 2  # hidden channels\n        self.cv1 = Conv(c1, c_, 1, 1)\n        self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)\n        self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])\n\n    def forward(self, x):\n        x = self.cv1(x)\n        with warnings.catch_warnings():\n            warnings.simplefilter('ignore')  # suppress torch 1.9.0 max_pool2d() warning\n            return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))\n\n\nclass SPPF(nn.Module):\n    # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher\n    def __init__(self, c1, c2, k=5):  # equivalent to SPP(k=(5, 9, 13))\n        super().__init__()\n        c_ = c1 // 2  # hidden channels\n        self.cv1 = Conv(c1, c_, 1, 1)\n        self.cv2 = Conv(c_ * 4, c2, 1, 1)\n        self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)\n\n    def forward(self, x):\n        x = self.cv1(x)\n        with warnings.catch_warnings():\n            warnings.simplefilter('ignore')  # suppress torch 1.9.0 max_pool2d() warning\n            y1 = self.m(x)\n            y2 = self.m(y1)\n            return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))\n\n\nclass Focus(nn.Module):\n    # Focus wh information into c-space\n    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in, ch_out, kernel, stride, padding, groups\n        super().__init__()\n        self.conv = Conv(c1 * 4, c2, k, s, p, g, act)\n        # self.contract = Contract(gain=2)\n\n    def forward(self, x):  # x(b,c,w,h) -> y(b,4c,w/2,h/2)\n        return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1))\n        # return self.conv(self.contract(x))\n\n\nclass GhostConv(nn.Module):\n    # Ghost Convolution https://github.com/huawei-noah/ghostnet\n    def __init__(self, c1, c2, k=1, s=1, g=1, act=True):  # ch_in, ch_out, kernel, stride, groups\n        super().__init__()\n        c_ = c2 // 2  # hidden channels\n        self.cv1 = Conv(c1, c_, k, s, None, g, act)\n        self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)\n\n    def forward(self, x):\n        y = self.cv1(x)\n        return torch.cat((y, self.cv2(y)), 1)\n\n\nclass GhostBottleneck(nn.Module):\n    # Ghost Bottleneck https://github.com/huawei-noah/ghostnet\n    def __init__(self, c1, c2, k=3, s=1):  # ch_in, ch_out, kernel, stride\n        super().__init__()\n        c_ = c2 // 2\n        self.conv = nn.Sequential(\n            GhostConv(c1, c_, 1, 1),  # pw\n            DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(),  # dw\n            GhostConv(c_, c2, 1, 1, act=False)\n        )  # pw-linear\n        self.shortcut = nn.Sequential(\n            DWConv(c1, c1, k, s, act=False), Conv(\n                c1, c2, 1, 1,\n                act=False\n                )\n            ) if s == 2 else nn.Identity()\n\n    def forward(self, x):\n        return self.conv(x) + self.shortcut(x)\n\n\nclass Contract(nn.Module):\n    # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)\n    def __init__(self, gain=2):\n        super().__init__()\n        self.gain = gain\n\n    def forward(self, x):\n        b, c, h, w = x.size()  # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'\n        s = self.gain\n        x = x.view(b, c, h // s, s, w // s, s)  # x(1,64,40,2,40,2)\n        x = x.permute(0, 3, 5, 1, 2, 4).contiguous()  # x(1,2,2,64,40,40)\n        return x.view(b, c * s * s, h // s, w // s)  # x(1,256,40,40)\n\n\nclass Expand(nn.Module):\n    # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)\n    def __init__(self, gain=2):\n        super().__init__()\n        self.gain = gain\n\n    def forward(self, x):\n        b, c, h, w = x.size()  # assert C / s ** 2 == 0, 'Indivisible gain'\n        s = self.gain\n        x = x.view(b, s, s, c // s ** 2, h, w)  # x(1,2,2,16,80,80)\n        x = x.permute(0, 3, 4, 1, 5, 2).contiguous()  # x(1,16,80,2,80,2)\n        return x.view(b, c // s ** 2, h * s, w * s)  # x(1,16,160,160)\n\n\nclass Concat(nn.Module):\n    # Concatenate a list of tensors along dimension\n    def __init__(self, dimension=1):\n        super().__init__()\n        self.d = dimension\n\n    def forward(self, x):\n        return torch.cat(x, self.d)\n\n\nclass DetectMultiBackend(nn.Module):\n    # YOLOv5 MultiBackend class for python inference on various backends\n    def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):\n        # Usage:\n        #   PyTorch:              weights = *.pt\n        #   TorchScript:                    *.torchscript\n        #   ONNX Runtime:                   *.onnx\n        #   ONNX OpenCV DNN:                *.onnx with --dnn\n        #   OpenVINO:                       *.xml\n        #   CoreML:                         *.mlmodel\n        #   TensorRT:                       *.engine\n        #   TensorFlow SavedModel:          *_saved_model\n        #   TensorFlow GraphDef:            *.pb\n        #   TensorFlow Lite:                *.tflite\n        #   TensorFlow Edge TPU:            *_edgetpu.tflite\n        from models.experimental import attempt_download, attempt_load  # scoped to avoid circular import\n\n        super().__init__()\n        w = str(weights[0] if isinstance(weights, list) else weights)\n        pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(w)  # get backend\n        w = attempt_download(w)  # download if not local\n        fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu'  # FP16\n        stride, names = 32, [f'class{i}' for i in range(1000)]  # assign defaults\n        if data:  # assign class names (optional)\n            with open(data, errors='ignore') as f:\n                names = yaml.safe_load(f)['names']\n\n        if pt:  # PyTorch\n            model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)\n            stride = max(int(model.stride.max()), 32)  # model stride\n            names = model.module.names if hasattr(model, 'module') else model.names  # get class names\n            model.half() if fp16 else model.float()\n            self.model = model  # explicitly assign for to(), cpu(), cuda(), half()\n        elif jit:  # TorchScript\n            LOGGER.info(f'Loading {w} for TorchScript inference...')\n            extra_files = {'config.txt': ''}  # model metadata\n            model = torch.jit.load(w, _extra_files=extra_files)\n            model.half() if fp16 else model.float()\n            if extra_files['config.txt']:\n                d = json.loads(extra_files['config.txt'])  # extra_files dict\n                stride, names = int(d['stride']), d['names']\n        elif dnn:  # ONNX OpenCV DNN\n            LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n            check_requirements(('opencv-python>=4.5.4',))\n            net = cv2.dnn.readNetFromONNX(w)\n        elif onnx:  # ONNX Runtime\n            LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n            cuda = torch.cuda.is_available()\n            check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n            import onnxruntime\n            providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n            session = onnxruntime.InferenceSession(w, providers=providers)\n            meta = session.get_modelmeta().custom_metadata_map  # metadata\n            if 'stride' in meta:\n                stride, names = int(meta['stride']), eval(meta['names'])\n        elif xml:  # OpenVINO\n            LOGGER.info(f'Loading {w} for OpenVINO inference...')\n            check_requirements(('openvino',))  # requires openvino-dev: https://pypi.org/project/openvino-dev/\n            from openvino.runtime import Core, Layout, get_batch\n            ie = Core()\n            if not Path(w).is_file():  # if not *.xml\n                w = next(Path(w).glob('*.xml'))  # get *.xml file from *_openvino_model dir\n            network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))\n            if network.get_parameters()[0].get_layout().empty:\n                network.get_parameters()[0].set_layout(Layout(\"NCHW\"))\n            batch_dim = get_batch(network)\n            if batch_dim.is_static:\n                batch_size = batch_dim.get_length()\n            executable_network = ie.compile_model(network, device_name=\"CPU\")  # device_name=\"MYRIAD\" for Intel NCS2\n            output_layer = next(iter(executable_network.outputs))\n            meta = Path(w).with_suffix('.yaml')\n            if meta.exists():\n                stride, names = self._load_metadata(meta)  # load metadata\n        elif engine:  # TensorRT\n            LOGGER.info(f'Loading {w} for TensorRT inference...')\n            import tensorrt as trt  # https://developer.nvidia.com/nvidia-tensorrt-download\n            check_version(trt.__version__, '7.0.0', hard=True)  # require tensorrt>=7.0.0\n            Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n            logger = trt.Logger(trt.Logger.INFO)\n            with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n                model = runtime.deserialize_cuda_engine(f.read())\n            bindings = OrderedDict()\n            fp16 = False  # default updated below\n            for index in range(model.num_bindings):\n                name = model.get_binding_name(index)\n                dtype = trt.nptype(model.get_binding_dtype(index))\n                shape = tuple(model.get_binding_shape(index))\n                data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device)\n                bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr()))\n                if model.binding_is_input(index) and dtype == np.float16:\n                    fp16 = True\n            binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n            context = model.create_execution_context()\n            batch_size = bindings['images'].shape[0]\n        elif coreml:  # CoreML\n            LOGGER.info(f'Loading {w} for CoreML inference...')\n            import coremltools as ct\n            model = ct.models.MLModel(w)\n        else:  # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)\n            if saved_model:  # SavedModel\n                LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')\n                import tensorflow as tf\n                keras = False  # assume TF1 saved_model\n                model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)\n            elif pb:  # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n                LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')\n                import tensorflow as tf\n\n                def wrap_frozen_graph(gd, inputs, outputs):\n                    x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=\"\"), [])  # wrapped\n                    ge = x.graph.as_graph_element\n                    return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))\n\n                gd = tf.Graph().as_graph_def()  # graph_def\n                with open(w, 'rb') as f:\n                    gd.ParseFromString(f.read())\n                frozen_func = wrap_frozen_graph(gd, inputs=\"x:0\", outputs=\"Identity:0\")\n            elif tflite or edgetpu:  # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n                try:  # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu\n                    from tflite_runtime.interpreter import Interpreter, load_delegate\n                except ImportError:\n                    import tensorflow as tf\n                    Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,\n                if edgetpu:  # Edge TPU https://coral.ai/software/#edgetpu-runtime\n                    LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n                    delegate = {\n                        'Linux': 'libedgetpu.so.1',\n                        'Darwin': 'libedgetpu.1.dylib',\n                        'Windows': 'edgetpu.dll'}[platform.system()]\n                    interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])\n                else:  # Lite\n                    LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n                    interpreter = Interpreter(model_path=w)  # load TFLite model\n                interpreter.allocate_tensors()  # allocate\n                input_details = interpreter.get_input_details()  # inputs\n                output_details = interpreter.get_output_details()  # outputs\n            elif tfjs:\n                raise Exception('ERROR: YOLOv5 TF.js inference is not supported')\n            else:\n                raise Exception(f'ERROR: {w} is not a supported format')\n        self.__dict__.update(locals())  # assign all variables to self\n\n    def forward(self, im, augment=False, visualize=False, val=False):\n        # YOLOv5 MultiBackend inference\n        b, ch, h, w = im.shape  # batch, channel, height, width\n        if self.fp16 and im.dtype != torch.float16:\n            im = im.half()  # to FP16\n\n        if self.pt:  # PyTorch\n            y = self.model(im, augment=augment, visualize=visualize)[0]\n        elif self.jit:  # TorchScript\n            y = self.model(im)[0]\n        elif self.dnn:  # ONNX OpenCV DNN\n            im = im.cpu().numpy()  # torch to numpy\n            self.net.setInput(im)\n            y = self.net.forward()\n        elif self.onnx:  # ONNX Runtime\n            im = im.cpu().numpy()  # torch to numpy\n            y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]\n        elif self.xml:  # OpenVINO\n            im = im.cpu().numpy()  # FP32\n            y = self.executable_network([im])[self.output_layer]\n        elif self.engine:  # TensorRT\n            assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape)\n            self.binding_addrs['images'] = int(im.data_ptr())\n            self.context.execute_v2(list(self.binding_addrs.values()))\n            y = self.bindings['output'].data\n        elif self.coreml:  # CoreML\n            im = im.permute(0, 2, 3, 1).cpu().numpy()  # torch BCHW to numpy BHWC shape(1,320,192,3)\n            im = Image.fromarray((im[0] * 255).astype('uint8'))\n            # im = im.resize((192, 320), Image.ANTIALIAS)\n            y = self.model.predict({'image': im})  # coordinates are xywh normalized\n            if 'confidence' in y:\n                box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]])  # xyxy pixels\n                conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n                y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n            else:\n                k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1])  # output key\n                y = y[k]  # output\n        else:  # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)\n            im = im.permute(0, 2, 3, 1).cpu().numpy()  # torch BCHW to numpy BHWC shape(1,320,192,3)\n            if self.saved_model:  # SavedModel\n                y = (self.model(im, training=False) if self.keras else self.model(im)).numpy()\n            elif self.pb:  # GraphDef\n                y = self.frozen_func(x=self.tf.constant(im)).numpy()\n            else:  # Lite or Edge TPU\n                input, output = self.input_details[0], self.output_details[0]\n                int8 = input['dtype'] == np.uint8  # is TFLite quantized uint8 model\n                if int8:\n                    scale, zero_point = input['quantization']\n                    im = (im / scale + zero_point).astype(np.uint8)  # de-scale\n                self.interpreter.set_tensor(input['index'], im)\n                self.interpreter.invoke()\n                y = self.interpreter.get_tensor(output['index'])\n                if int8:\n                    scale, zero_point = output['quantization']\n                    y = (y.astype(np.float32) - zero_point) * scale  # re-scale\n            y[..., :4] *= [w, h, w, h]  # xywh normalized to pixels\n\n        if isinstance(y, np.ndarray):\n            y = torch.tensor(y, device=self.device)\n        return (y, []) if val else y\n\n    def warmup(self, imgsz=(1, 3, 640, 640)):\n        # Warmup model by running inference once\n        warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb\n        if any(warmup_types) and self.device.type != 'cpu':\n            im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device)  # input\n            for _ in range(2 if self.jit else 1):  #\n                self.forward(im)  # warmup\n\n    @staticmethod\n    def model_type(p='path/to/model.pt'):\n        # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx\n        from export import export_formats\n        suffixes = list(export_formats().Suffix) + ['.xml']  # export suffixes\n        check_suffix(p, suffixes)  # checks\n        p = Path(p).name  # eliminate trailing separators\n        pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, xml2 = (s in p for s in suffixes)\n        xml |= xml2  # *_openvino_model or *.xml\n        tflite &= not edgetpu  # *.tflite\n        return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs\n\n    @staticmethod\n    def _load_metadata(f='path/to/meta.yaml'):\n        # Load metadata from meta.yaml if it exists\n        with open(f, errors='ignore') as f:\n            d = yaml.safe_load(f)\n        return d['stride'], d['names']  # assign stride, names\n\n\nclass AutoShape(nn.Module):\n    # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS\n    conf = 0.25  # NMS confidence threshold\n    iou = 0.45  # NMS IoU threshold\n    agnostic = False  # NMS class-agnostic\n    multi_label = False  # NMS multiple labels per box\n    classes = None  # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs\n    max_det = 1000  # maximum number of detections per image\n    amp = False  # Automatic Mixed Precision (AMP) inference\n\n    def __init__(self, model, verbose=True):\n        super().__init__()\n        if verbose:\n            LOGGER.info('Adding AutoShape... ')\n        copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=())  # copy attributes\n        self.dmb = isinstance(model, DetectMultiBackend)  # DetectMultiBackend() instance\n        self.pt = not self.dmb or model.pt  # PyTorch model\n        self.model = model.eval()\n\n    def _apply(self, fn):\n        # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers\n        self = super()._apply(fn)\n        if self.pt:\n            m = self.model.model.model[-1] if self.dmb else self.model.model[-1]  # Detect()\n            m.stride = fn(m.stride)\n            m.grid = list(map(fn, m.grid))\n            if isinstance(m.anchor_grid, list):\n                m.anchor_grid = list(map(fn, m.anchor_grid))\n        return self\n\n    @torch.no_grad()\n    def forward(self, imgs, size=640, augment=False, profile=False):\n        # Inference from various sources. For height=640, width=1280, RGB images example inputs are:\n        #   file:       imgs = 'data/images/zidane.jpg'  # str or PosixPath\n        #   URI:             = 'https://ultralytics.com/images/zidane.jpg'\n        #   OpenCV:          = cv2.imread('image.jpg')[:,:,::-1]  # HWC BGR to RGB x(640,1280,3)\n        #   PIL:             = Image.open('image.jpg') or ImageGrab.grab()  # HWC x(640,1280,3)\n        #   numpy:           = np.zeros((640,1280,3))  # HWC\n        #   torch:           = torch.zeros(16,3,320,640)  # BCHW (scaled to size=640, 0-1 values)\n        #   multiple:        = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...]  # list of images\n\n        t = [time_sync()]\n        p = next(self.model.parameters()) if self.pt else torch.zeros(1, device=self.model.device)  # for device, type\n        autocast = self.amp and (p.device.type != 'cpu')  # Automatic Mixed Precision (AMP) inference\n        if isinstance(imgs, torch.Tensor):  # torch\n            with amp.autocast(autocast):\n                return self.model(imgs.to(p.device).type_as(p), augment, profile)  # inference\n\n        # Pre-process\n        n, imgs = (len(imgs), list(imgs)) if isinstance(imgs, (list, tuple)) else (1, [imgs])  # number, list of images\n        shape0, shape1, files = [], [], []  # image and inference shapes, filenames\n        for i, im in enumerate(imgs):\n            f = f'image{i}'  # filename\n            if isinstance(im, (str, Path)):  # filename or uri\n                im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im\n                im = np.asarray(exif_transpose(im))\n            elif isinstance(im, Image.Image):  # PIL Image\n                im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f\n            files.append(Path(f).with_suffix('.jpg').name)\n            if im.shape[0] < 5:  # image in CHW\n                im = im.transpose((1, 2, 0))  # reverse dataloader .transpose(2, 0, 1)\n            im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3)  # enforce 3ch input\n            s = im.shape[:2]  # HWC\n            shape0.append(s)  # image shape\n            g = (size / max(s))  # gain\n            shape1.append([y * g for y in s])\n            imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im)  # update\n        shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)]  # inf shape\n        x = [letterbox(im, shape1, auto=False)[0] for im in imgs]  # pad\n        x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2)))  # stack and BHWC to BCHW\n        x = torch.from_numpy(x).to(p.device).type_as(p) / 255  # uint8 to fp16/32\n        t.append(time_sync())\n\n        with amp.autocast(autocast):\n            # Inference\n            y = self.model(x, augment, profile)  # forward\n            t.append(time_sync())\n\n            # Post-process\n            y = non_max_suppression(\n                y if self.dmb else y[0],\n                self.conf,\n                self.iou,\n                self.classes,\n                self.agnostic,\n                self.multi_label,\n                max_det=self.max_det\n                )  # NMS\n            for i in range(n):\n                scale_coords(shape1, y[i][:, :4], shape0[i])\n\n            t.append(time_sync())\n            return Detections(imgs, y, files, t, self.names, x.shape)\n\n\nclass Detections:\n    # YOLOv5 detections class for inference results\n    def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None):\n        super().__init__()\n        d = pred[0].device  # device\n        gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs]  # normalizations\n        self.imgs = imgs  # list of images as numpy arrays\n        self.pred = pred  # list of tensors pred[0] = (xyxy, conf, cls)\n        self.names = names  # class names\n        self.files = files  # image filenames\n        self.times = times  # profiling times\n        self.xyxy = pred  # xyxy pixels\n        self.xywh = [xyxy2xywh(x) for x in pred]  # xywh pixels\n        self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)]  # xyxy normalized\n        self.xywhn = [x / g for x, g in zip(self.xywh, gn)]  # xywh normalized\n        self.n = len(self.pred)  # number of images (batch size)\n        self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3))  # timestamps (ms)\n        self.s = shape  # inference BCHW shape\n\n    def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')):\n        crops = []\n        for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):\n            s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} '  # string\n            if pred.shape[0]:\n                for c in pred[:, -1].unique():\n                    n = (pred[:, -1] == c).sum()  # detections per class\n                    s += f\"{n} {self.names[int(c)]}{'s' * (n > 1)}, \"  # add to string\n                if show or save or render or crop:\n                    annotator = Annotator(im, example=str(self.names))\n                    for *box, conf, cls in reversed(pred):  # xyxy, confidence, class\n                        label = f'{self.names[int(cls)]} {conf:.2f}'\n                        if crop:\n                            file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None\n                            crops.append(\n                                {\n                                    'box': box,\n                                    'conf': conf,\n                                    'cls': cls,\n                                    'label': label,\n                                    'im': save_one_box(box, im, file=file, save=save)}\n                            )\n                        else:  # all others\n                            annotator.box_label(box, label if labels else '', color=colors(cls))\n                    im = annotator.im\n            else:\n                s += '(no detections)'\n\n            im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im  # from np\n            if pprint:\n                print(s.rstrip(', '))\n            if show:\n                im.show(self.files[i])  # show\n            if save:\n                f = self.files[i]\n                im.save(save_dir / f)  # save\n                if i == self.n - 1:\n                    LOGGER.info(f\"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}\")\n            if render:\n                self.imgs[i] = np.asarray(im)\n        if crop:\n            if save:\n                LOGGER.info(f'Saved results to {save_dir}\\n')\n            return crops\n\n    def print(self):\n        self.display(pprint=True)  # print results\n        print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t)\n\n    def show(self, labels=True):\n        self.display(show=True, labels=labels)  # show results\n\n    def save(self, labels=True, save_dir='runs/detect/exp'):\n        save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True)  # increment save_dir\n        self.display(save=True, labels=labels, save_dir=save_dir)  # save results\n\n    def crop(self, save=True, save_dir='runs/detect/exp'):\n        save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None\n        return self.display(crop=True, save=save, save_dir=save_dir)  # crop results\n\n    def render(self, labels=True):\n        self.display(render=True, labels=labels)  # render results\n        return self.imgs\n\n    def pandas(self):\n        # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])\n        new = copy(self)  # return copy\n        ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name'  # xyxy columns\n        cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name'  # xywh columns\n        for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):\n            a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)]  # update\n            setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])\n        return new\n\n    def tolist(self):\n        # return a list of Detections objects, i.e. 'for result in results.tolist():'\n        r = range(self.n)  # iterable\n        x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]\n        # for d in x:\n        #    for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:\n        #        setattr(d, k, getattr(d, k)[0])  # pop out of list\n        return x\n\n    def __len__(self):\n        return self.n  # override len(results)\n\n    def __str__(self):\n        self.print()  # override print(results)\n        return ''\n\n\nclass Classify(nn.Module):\n    # Classification head, i.e. x(b,c1,20,20) to x(b,c2)\n    def __init__(self, c1, c2, k=1, s=1, p=None, g=1):  # ch_in, ch_out, kernel, stride, padding, groups\n        super().__init__()\n        self.aap = nn.AdaptiveAvgPool2d(1)  # to x(b,c1,1,1)\n        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g)  # to x(b,c2,1,1)\n        self.flat = nn.Flatten()\n\n    def forward(self, x):\n        z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1)  # cat if list\n        return self.flat(self.conv(z))  # flatten to x(b,c2)\n"
  },
  {
    "path": "module/detect/models/experimental.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nExperimental modules\n\"\"\"\nimport math\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom models.common import Conv\nfrom utils.downloads import attempt_download\n\n\nclass Sum(nn.Module):\n    # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070\n    def __init__(self, n, weight=False):  # n: number of inputs\n        super().__init__()\n        self.weight = weight  # apply weights boolean\n        self.iter = range(n - 1)  # iter object\n        if weight:\n            self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True)  # layer weights\n\n    def forward(self, x):\n        y = x[0]  # no weight\n        if self.weight:\n            w = torch.sigmoid(self.w) * 2\n            for i in self.iter:\n                y = y + x[i + 1] * w[i]\n        else:\n            for i in self.iter:\n                y = y + x[i + 1]\n        return y\n\n\nclass MixConv2d(nn.Module):\n    # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595\n    def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):  # ch_in, ch_out, kernel, stride, ch_strategy\n        super().__init__()\n        n = len(k)  # number of convolutions\n        if equal_ch:  # equal c_ per group\n            i = torch.linspace(0, n - 1E-6, c2).floor()  # c2 indices\n            c_ = [(i == g).sum() for g in range(n)]  # intermediate channels\n        else:  # equal weight.numel() per group\n            b = [c2] + [0] * n\n            a = np.eye(n + 1, n, k=-1)\n            a -= np.roll(a, 1, axis=1)\n            a *= np.array(k) ** 2\n            a[0] = 1\n            c_ = np.linalg.lstsq(a, b, rcond=None)[0].round()  # solve for equal weight indices, ax = b\n\n        self.m = nn.ModuleList([\n            nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])\n        self.bn = nn.BatchNorm2d(c2)\n        self.act = nn.SiLU()\n\n    def forward(self, x):\n        return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))\n\n\nclass Ensemble(nn.ModuleList):\n    # Ensemble of models\n    def __init__(self):\n        super().__init__()\n\n    def forward(self, x, augment=False, profile=False, visualize=False):\n        y = [module(x, augment, profile, visualize)[0] for module in self]\n        # y = torch.stack(y).max(0)[0]  # max ensemble\n        # y = torch.stack(y).mean(0)  # mean ensemble\n        y = torch.cat(y, 1)  # nms ensemble\n        return y, None  # inference, train output\n\n\ndef attempt_load(weights, device=None, inplace=True, fuse=True):\n    # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n    from models.yolo import Detect, Model\n\n    model = Ensemble()\n    for w in weights if isinstance(weights, list) else [weights]:\n        ckpt = torch.load(attempt_download(w), map_location='cpu')  # load\n        ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float()  # FP32 model\n        model.append(ckpt.fuse().eval() if fuse else ckpt.eval())  # fused or un-fused model in eval mode\n\n    # Compatibility updates\n    for m in model.modules():\n        t = type(m)\n        if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):\n            m.inplace = inplace  # torch 1.7.0 compatibility\n            if t is Detect and not isinstance(m.anchor_grid, list):\n                delattr(m, 'anchor_grid')\n                setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)\n        elif t is Conv:\n            m._non_persistent_buffers_set = set()  # torch 1.6.0 compatibility\n        elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):\n            m.recompute_scale_factor = None  # torch 1.11.0 compatibility\n\n    if len(model) == 1:\n        return model[-1]  # return model\n    print(f'Ensemble created with {weights}\\n')\n    for k in 'names', 'nc', 'yaml':\n        setattr(model, k, getattr(model[0], k))\n    model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride  # max stride\n    assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'\n    return model  # return ensemble\n"
  },
  {
    "path": "module/detect/models/hub/anchors.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n# Default anchors for COCO data\n\n\n# P5 -------------------------------------------------------------------------------------------------------------------\n# P5-640:\nanchors_p5_640:\n  - [10,13, 16,30, 33,23]  # P3/8\n  - [30,61, 62,45, 59,119]  # P4/16\n  - [116,90, 156,198, 373,326]  # P5/32\n\n\n# P6 -------------------------------------------------------------------------------------------------------------------\n# P6-640:  thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11,  21,19,  17,41,  43,32,  39,70,  86,64,  65,131,  134,130,  120,265,  282,180,  247,354,  512,387\nanchors_p6_640:\n  - [9,11,  21,19,  17,41]  # P3/8\n  - [43,32,  39,70,  86,64]  # P4/16\n  - [65,131,  134,130,  120,265]  # P5/32\n  - [282,180,  247,354,  512,387]  # P6/64\n\n# P6-1280:  thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27,  44,40,  38,94,  96,68,  86,152,  180,137,  140,301,  303,264,  238,542,  436,615,  739,380,  925,792\nanchors_p6_1280:\n  - [19,27,  44,40,  38,94]  # P3/8\n  - [96,68,  86,152,  180,137]  # P4/16\n  - [140,301,  303,264,  238,542]  # P5/32\n  - [436,615,  739,380,  925,792]  # P6/64\n\n# P6-1920:  thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41,  67,59,  57,141,  144,103,  129,227,  270,205,  209,452,  455,396,  358,812,  653,922,  1109,570,  1387,1187\nanchors_p6_1920:\n  - [28,41,  67,59,  57,141]  # P3/8\n  - [144,103,  129,227,  270,205]  # P4/16\n  - [209,452,  455,396,  358,812]  # P5/32\n  - [653,922,  1109,570,  1387,1187]  # P6/64\n\n\n# P7 -------------------------------------------------------------------------------------------------------------------\n# P7-640:  thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11,  13,30,  29,20,  30,46,  61,38,  39,92,  78,80,  146,66,  79,163,  149,150,  321,143,  157,303,  257,402,  359,290,  524,372\nanchors_p7_640:\n  - [11,11,  13,30,  29,20]  # P3/8\n  - [30,46,  61,38,  39,92]  # P4/16\n  - [78,80,  146,66,  79,163]  # P5/32\n  - [149,150,  321,143,  157,303]  # P6/64\n  - [257,402,  359,290,  524,372]  # P7/128\n\n# P7-1280:  thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22,  54,36,  32,77,  70,83,  138,71,  75,173,  165,159,  148,334,  375,151,  334,317,  251,626,  499,474,  750,326,  534,814,  1079,818\nanchors_p7_1280:\n  - [19,22,  54,36,  32,77]  # P3/8\n  - [70,83,  138,71,  75,173]  # P4/16\n  - [165,159,  148,334,  375,151]  # P5/32\n  - [334,317,  251,626,  499,474]  # P6/64\n  - [750,326,  534,814,  1079,818]  # P7/128\n\n# P7-1920:  thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34,  81,55,  47,115,  105,124,  207,107,  113,259,  247,238,  222,500,  563,227,  501,476,  376,939,  749,711,  1126,489,  801,1222,  1618,1227\nanchors_p7_1920:\n  - [29,34,  81,55,  47,115]  # P3/8\n  - [105,124,  207,107,  113,259]  # P4/16\n  - [247,238,  222,500,  563,227]  # P5/32\n  - [501,476,  376,939,  749,711]  # P6/64\n  - [1126,489,  801,1222,  1618,1227]  # P7/128\n"
  },
  {
    "path": "module/detect/models/hub/yolov3-spp.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth multiple\nwidth_multiple: 1.0  # layer channel multiple\nanchors:\n  - [10,13, 16,30, 33,23]  # P3/8\n  - [30,61, 62,45, 59,119]  # P4/16\n  - [116,90, 156,198, 373,326]  # P5/32\n\n# darknet53 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [32, 3, 1]],  # 0\n   [-1, 1, Conv, [64, 3, 2]],  # 1-P1/2\n   [-1, 1, Bottleneck, [64]],\n   [-1, 1, Conv, [128, 3, 2]],  # 3-P2/4\n   [-1, 2, Bottleneck, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 5-P3/8\n   [-1, 8, Bottleneck, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 7-P4/16\n   [-1, 8, Bottleneck, [512]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 9-P5/32\n   [-1, 4, Bottleneck, [1024]],  # 10\n  ]\n\n# YOLOv3-SPP head\nhead:\n  [[-1, 1, Bottleneck, [1024, False]],\n   [-1, 1, SPP, [512, [5, 9, 13]]],\n   [-1, 1, Conv, [1024, 3, 1]],\n   [-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, Conv, [1024, 3, 1]],  # 15 (P5/32-large)\n\n   [-2, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 8], 1, Concat, [1]],  # cat backbone P4\n   [-1, 1, Bottleneck, [512, False]],\n   [-1, 1, Bottleneck, [512, False]],\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, Conv, [512, 3, 1]],  # 22 (P4/16-medium)\n\n   [-2, 1, Conv, [128, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P3\n   [-1, 1, Bottleneck, [256, False]],\n   [-1, 2, Bottleneck, [256, False]],  # 27 (P3/8-small)\n\n   [[27, 22, 15], 1, Detect, [nc, anchors]],   # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov3-tiny.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth multiple\nwidth_multiple: 1.0  # layer channel multiple\nanchors:\n  - [10,14, 23,27, 37,58]  # P4/16\n  - [81,82, 135,169, 344,319]  # P5/32\n\n# YOLOv3-tiny backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [16, 3, 1]],  # 0\n   [-1, 1, nn.MaxPool2d, [2, 2, 0]],  # 1-P1/2\n   [-1, 1, Conv, [32, 3, 1]],\n   [-1, 1, nn.MaxPool2d, [2, 2, 0]],  # 3-P2/4\n   [-1, 1, Conv, [64, 3, 1]],\n   [-1, 1, nn.MaxPool2d, [2, 2, 0]],  # 5-P3/8\n   [-1, 1, Conv, [128, 3, 1]],\n   [-1, 1, nn.MaxPool2d, [2, 2, 0]],  # 7-P4/16\n   [-1, 1, Conv, [256, 3, 1]],\n   [-1, 1, nn.MaxPool2d, [2, 2, 0]],  # 9-P5/32\n   [-1, 1, Conv, [512, 3, 1]],\n   [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]],  # 11\n   [-1, 1, nn.MaxPool2d, [2, 1, 0]],  # 12\n  ]\n\n# YOLOv3-tiny head\nhead:\n  [[-1, 1, Conv, [1024, 3, 1]],\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, Conv, [512, 3, 1]],  # 15 (P5/32-large)\n\n   [-2, 1, Conv, [128, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 8], 1, Concat, [1]],  # cat backbone P4\n   [-1, 1, Conv, [256, 3, 1]],  # 19 (P4/16-medium)\n\n   [[19, 15], 1, Detect, [nc, anchors]],  # Detect(P4, P5)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov3.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth multiple\nwidth_multiple: 1.0  # layer channel multiple\nanchors:\n  - [10,13, 16,30, 33,23]  # P3/8\n  - [30,61, 62,45, 59,119]  # P4/16\n  - [116,90, 156,198, 373,326]  # P5/32\n\n# darknet53 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [32, 3, 1]],  # 0\n   [-1, 1, Conv, [64, 3, 2]],  # 1-P1/2\n   [-1, 1, Bottleneck, [64]],\n   [-1, 1, Conv, [128, 3, 2]],  # 3-P2/4\n   [-1, 2, Bottleneck, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 5-P3/8\n   [-1, 8, Bottleneck, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 7-P4/16\n   [-1, 8, Bottleneck, [512]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 9-P5/32\n   [-1, 4, Bottleneck, [1024]],  # 10\n  ]\n\n# YOLOv3 head\nhead:\n  [[-1, 1, Bottleneck, [1024, False]],\n   [-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, Conv, [1024, 3, 1]],\n   [-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, Conv, [1024, 3, 1]],  # 15 (P5/32-large)\n\n   [-2, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 8], 1, Concat, [1]],  # cat backbone P4\n   [-1, 1, Bottleneck, [512, False]],\n   [-1, 1, Bottleneck, [512, False]],\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, Conv, [512, 3, 1]],  # 22 (P4/16-medium)\n\n   [-2, 1, Conv, [128, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P3\n   [-1, 1, Bottleneck, [256, False]],\n   [-1, 2, Bottleneck, [256, False]],  # 27 (P3/8-small)\n\n   [[27, 22, 15], 1, Detect, [nc, anchors]],   # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov5-bifpn.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth multiple\nwidth_multiple: 1.0  # layer channel multiple\nanchors:\n  - [10,13, 16,30, 33,23]  # P3/8\n  - [30,61, 62,45, 59,119]  # P4/16\n  - [116,90, 156,198, 373,326]  # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 9\n  ]\n\n# YOLOv5 v6.0 BiFPN head\nhead:\n  [[-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 13\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 14, 6], 1, Concat, [1]],  # cat P4 <--- BiFPN change\n   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 10], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)\n\n   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov5-fpn.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth multiple\nwidth_multiple: 1.0  # layer channel multiple\nanchors:\n  - [10,13, 16,30, 33,23]  # P3/8\n  - [30,61, 62,45, 59,119]  # P4/16\n  - [116,90, 156,198, 373,326]  # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 9\n  ]\n\n# YOLOv5 v6.0 FPN head\nhead:\n  [[-1, 3, C3, [1024, False]],  # 10 (P5/32-large)\n\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 1, Conv, [512, 1, 1]],\n   [-1, 3, C3, [512, False]],  # 14 (P4/16-medium)\n\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 3, C3, [256, False]],  # 18 (P3/8-small)\n\n   [[18, 14, 10], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov5-p2.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth multiple\nwidth_multiple: 1.0  # layer channel multiple\nanchors: 3  # AutoAnchor evolves 3 anchors per P output layer\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 9\n  ]\n\n# YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs\nhead:\n  [[-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 13\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)\n\n   [-1, 1, Conv, [128, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 2], 1, Concat, [1]],  # cat backbone P2\n   [-1, 1, C3, [128, False]],  # 21 (P2/4-xsmall)\n\n   [-1, 1, Conv, [128, 3, 2]],\n   [[-1, 18], 1, Concat, [1]],  # cat head P3\n   [-1, 3, C3, [256, False]],  # 24 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 14], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3, [512, False]],  # 27 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 10], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [1024, False]],  # 30 (P5/32-large)\n\n   [[21, 24, 27, 30], 1, Detect, [nc, anchors]],  # Detect(P2, P3, P4, P5)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov5-p34.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 0.33  # model depth multiple\nwidth_multiple: 0.50  # layer channel multiple\nanchors: 3  # AutoAnchor evolves 3 anchors per P output layer\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ],  # 0-P1/2\n    [ -1, 1, Conv, [ 128, 3, 2 ] ],  # 1-P2/4\n    [ -1, 3, C3, [ 128 ] ],\n    [ -1, 1, Conv, [ 256, 3, 2 ] ],  # 3-P3/8\n    [ -1, 6, C3, [ 256 ] ],\n    [ -1, 1, Conv, [ 512, 3, 2 ] ],  # 5-P4/16\n    [ -1, 9, C3, [ 512 ] ],\n    [ -1, 1, Conv, [ 1024, 3, 2 ] ],  # 7-P5/32\n    [ -1, 3, C3, [ 1024 ] ],\n    [ -1, 1, SPPF, [ 1024, 5 ] ],  # 9\n  ]\n\n# YOLOv5 v6.0 head with (P3, P4) outputs\nhead:\n  [ [ -1, 1, Conv, [ 512, 1, 1 ] ],\n    [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],\n    [ [ -1, 6 ], 1, Concat, [ 1 ] ],  # cat backbone P4\n    [ -1, 3, C3, [ 512, False ] ],  # 13\n\n    [ -1, 1, Conv, [ 256, 1, 1 ] ],\n    [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],\n    [ [ -1, 4 ], 1, Concat, [ 1 ] ],  # cat backbone P3\n    [ -1, 3, C3, [ 256, False ] ],  # 17 (P3/8-small)\n\n    [ -1, 1, Conv, [ 256, 3, 2 ] ],\n    [ [ -1, 14 ], 1, Concat, [ 1 ] ],  # cat head P4\n    [ -1, 3, C3, [ 512, False ] ],  # 20 (P4/16-medium)\n\n    [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ],  # Detect(P3, P4)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov5-p6.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth multiple\nwidth_multiple: 1.0  # layer channel multiple\nanchors: 3  # AutoAnchor evolves 3 anchors per P output layer\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [768, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [768]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 9-P6/64\n   [-1, 3, C3, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 11\n  ]\n\n# YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs\nhead:\n  [[-1, 1, Conv, [768, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 8], 1, Concat, [1]],  # cat backbone P5\n   [-1, 3, C3, [768, False]],  # 15\n\n   [-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 19\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 23 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 20], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3, [512, False]],  # 26 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 16], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [768, False]],  # 29 (P5/32-large)\n\n   [-1, 1, Conv, [768, 3, 2]],\n   [[-1, 12], 1, Concat, [1]],  # cat head P6\n   [-1, 3, C3, [1024, False]],  # 32 (P6/64-xlarge)\n\n   [[23, 26, 29, 32], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5, P6)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov5-p7.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth multiple\nwidth_multiple: 1.0  # layer channel multiple\nanchors: 3  # AutoAnchor evolves 3 anchors per P output layer\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [768, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [768]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 9-P6/64\n   [-1, 3, C3, [1024]],\n   [-1, 1, Conv, [1280, 3, 2]],  # 11-P7/128\n   [-1, 3, C3, [1280]],\n   [-1, 1, SPPF, [1280, 5]],  # 13\n  ]\n\n# YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs\nhead:\n  [[-1, 1, Conv, [1024, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 10], 1, Concat, [1]],  # cat backbone P6\n   [-1, 3, C3, [1024, False]],  # 17\n\n   [-1, 1, Conv, [768, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 8], 1, Concat, [1]],  # cat backbone P5\n   [-1, 3, C3, [768, False]],  # 21\n\n   [-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 25\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 29 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 26], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3, [512, False]],  # 32 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 22], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [768, False]],  # 35 (P5/32-large)\n\n   [-1, 1, Conv, [768, 3, 2]],\n   [[-1, 18], 1, Concat, [1]],  # cat head P6\n   [-1, 3, C3, [1024, False]],  # 38 (P6/64-xlarge)\n\n   [-1, 1, Conv, [1024, 3, 2]],\n   [[-1, 14], 1, Concat, [1]],  # cat head P7\n   [-1, 3, C3, [1280, False]],  # 41 (P7/128-xxlarge)\n\n   [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5, P6, P7)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov5-panet.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth multiple\nwidth_multiple: 1.0  # layer channel multiple\nanchors:\n  - [10,13, 16,30, 33,23]  # P3/8\n  - [30,61, 62,45, 59,119]  # P4/16\n  - [116,90, 156,198, 373,326]  # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 9\n  ]\n\n# YOLOv5 v6.0 PANet head\nhead:\n  [[-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 13\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 14], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 10], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)\n\n   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov5l6.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth multiple\nwidth_multiple: 1.0  # layer channel multiple\nanchors:\n  - [19,27,  44,40,  38,94]  # P3/8\n  - [96,68,  86,152,  180,137]  # P4/16\n  - [140,301,  303,264,  238,542]  # P5/32\n  - [436,615,  739,380,  925,792]  # P6/64\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [768, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [768]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 9-P6/64\n   [-1, 3, C3, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 11\n  ]\n\n# YOLOv5 v6.0 head\nhead:\n  [[-1, 1, Conv, [768, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 8], 1, Concat, [1]],  # cat backbone P5\n   [-1, 3, C3, [768, False]],  # 15\n\n   [-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 19\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 23 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 20], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3, [512, False]],  # 26 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 16], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [768, False]],  # 29 (P5/32-large)\n\n   [-1, 1, Conv, [768, 3, 2]],\n   [[-1, 12], 1, Concat, [1]],  # cat head P6\n   [-1, 3, C3, [1024, False]],  # 32 (P6/64-xlarge)\n\n   [[23, 26, 29, 32], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5, P6)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov5m6.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 0.67  # model depth multiple\nwidth_multiple: 0.75  # layer channel multiple\nanchors:\n  - [19,27,  44,40,  38,94]  # P3/8\n  - [96,68,  86,152,  180,137]  # P4/16\n  - [140,301,  303,264,  238,542]  # P5/32\n  - [436,615,  739,380,  925,792]  # P6/64\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [768, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [768]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 9-P6/64\n   [-1, 3, C3, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 11\n  ]\n\n# YOLOv5 v6.0 head\nhead:\n  [[-1, 1, Conv, [768, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 8], 1, Concat, [1]],  # cat backbone P5\n   [-1, 3, C3, [768, False]],  # 15\n\n   [-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 19\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 23 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 20], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3, [512, False]],  # 26 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 16], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [768, False]],  # 29 (P5/32-large)\n\n   [-1, 1, Conv, [768, 3, 2]],\n   [[-1, 12], 1, Concat, [1]],  # cat head P6\n   [-1, 3, C3, [1024, False]],  # 32 (P6/64-xlarge)\n\n   [[23, 26, 29, 32], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5, P6)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov5n6.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 0.33  # model depth multiple\nwidth_multiple: 0.25  # layer channel multiple\nanchors:\n  - [19,27,  44,40,  38,94]  # P3/8\n  - [96,68,  86,152,  180,137]  # P4/16\n  - [140,301,  303,264,  238,542]  # P5/32\n  - [436,615,  739,380,  925,792]  # P6/64\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [768, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [768]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 9-P6/64\n   [-1, 3, C3, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 11\n  ]\n\n# YOLOv5 v6.0 head\nhead:\n  [[-1, 1, Conv, [768, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 8], 1, Concat, [1]],  # cat backbone P5\n   [-1, 3, C3, [768, False]],  # 15\n\n   [-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 19\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 23 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 20], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3, [512, False]],  # 26 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 16], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [768, False]],  # 29 (P5/32-large)\n\n   [-1, 1, Conv, [768, 3, 2]],\n   [[-1, 12], 1, Concat, [1]],  # cat head P6\n   [-1, 3, C3, [1024, False]],  # 32 (P6/64-xlarge)\n\n   [[23, 26, 29, 32], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5, P6)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov5s-ghost.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 0.33  # model depth multiple\nwidth_multiple: 0.50  # layer channel multiple\nanchors:\n  - [10,13, 16,30, 33,23]  # P3/8\n  - [30,61, 62,45, 59,119]  # P4/16\n  - [116,90, 156,198, 373,326]  # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, GhostConv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3Ghost, [128]],\n   [-1, 1, GhostConv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3Ghost, [256]],\n   [-1, 1, GhostConv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3Ghost, [512]],\n   [-1, 1, GhostConv, [1024, 3, 2]],  # 7-P5/32\n   [-1, 3, C3Ghost, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead:\n  [[-1, 1, GhostConv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3Ghost, [512, False]],  # 13\n\n   [-1, 1, GhostConv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3Ghost, [256, False]],  # 17 (P3/8-small)\n\n   [-1, 1, GhostConv, [256, 3, 2]],\n   [[-1, 14], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3Ghost, [512, False]],  # 20 (P4/16-medium)\n\n   [-1, 1, GhostConv, [512, 3, 2]],\n   [[-1, 10], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3Ghost, [1024, False]],  # 23 (P5/32-large)\n\n   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov5s-transformer.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 0.33  # model depth multiple\nwidth_multiple: 0.50  # layer channel multiple\nanchors:\n  - [10,13, 16,30, 33,23]  # P3/8\n  - [30,61, 62,45, 59,119]  # P4/16\n  - [116,90, 156,198, 373,326]  # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32\n   [-1, 3, C3TR, [1024]],  # 9 <--- C3TR() Transformer module\n   [-1, 1, SPPF, [1024, 5]],  # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead:\n  [[-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 13\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 14], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 10], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)\n\n   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov5s6.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 0.33  # model depth multiple\nwidth_multiple: 0.50  # layer channel multiple\nanchors:\n  - [19,27,  44,40,  38,94]  # P3/8\n  - [96,68,  86,152,  180,137]  # P4/16\n  - [140,301,  303,264,  238,542]  # P5/32\n  - [436,615,  739,380,  925,792]  # P6/64\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [768, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [768]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 9-P6/64\n   [-1, 3, C3, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 11\n  ]\n\n# YOLOv5 v6.0 head\nhead:\n  [[-1, 1, Conv, [768, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 8], 1, Concat, [1]],  # cat backbone P5\n   [-1, 3, C3, [768, False]],  # 15\n\n   [-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 19\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 23 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 20], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3, [512, False]],  # 26 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 16], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [768, False]],  # 29 (P5/32-large)\n\n   [-1, 1, Conv, [768, 3, 2]],\n   [[-1, 12], 1, Concat, [1]],  # cat head P6\n   [-1, 3, C3, [1024, False]],  # 32 (P6/64-xlarge)\n\n   [[23, 26, 29, 32], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5, P6)\n  ]\n"
  },
  {
    "path": "module/detect/models/hub/yolov5x6.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.33  # model depth multiple\nwidth_multiple: 1.25  # layer channel multiple\nanchors:\n  - [19,27,  44,40,  38,94]  # P3/8\n  - [96,68,  86,152,  180,137]  # P4/16\n  - [140,301,  303,264,  238,542]  # P5/32\n  - [436,615,  739,380,  925,792]  # P6/64\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [768, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [768]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 9-P6/64\n   [-1, 3, C3, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 11\n  ]\n\n# YOLOv5 v6.0 head\nhead:\n  [[-1, 1, Conv, [768, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 8], 1, Concat, [1]],  # cat backbone P5\n   [-1, 3, C3, [768, False]],  # 15\n\n   [-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 19\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 23 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 20], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3, [512, False]],  # 26 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 16], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [768, False]],  # 29 (P5/32-large)\n\n   [-1, 1, Conv, [768, 3, 2]],\n   [[-1, 12], 1, Concat, [1]],  # cat head P6\n   [-1, 3, C3, [1024, False]],  # 32 (P6/64-xlarge)\n\n   [[23, 26, 29, 32], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5, P6)\n  ]\n"
  },
  {
    "path": "module/detect/models/tf.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nTensorFlow, Keras and TFLite versions of YOLOv5\nAuthored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127\n\nUsage:\n    $ python models/tf.py --weights yolov5s.pt\n\nExport:\n    $ python path/to/export.py --weights yolov5s.pt --include saved_model pb tflite tfjs\n\"\"\"\n\nimport argparse\nimport sys\nfrom copy import deepcopy\nfrom pathlib import Path\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\n# ROOT = ROOT.relative_to(Path.cwd())  # relative\n\nimport numpy as np\nimport tensorflow as tf\nimport torch\nimport torch.nn as nn\nfrom tensorflow import keras\n\nfrom models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv,\n                           DWConvTranspose2d, Focus, autopad)\nfrom models.experimental import MixConv2d, attempt_load\nfrom models.yolo import Detect\nfrom utils.activations import SiLU\nfrom utils.general import LOGGER, make_divisible, print_args\n\n\nclass TFBN(keras.layers.Layer):\n    # TensorFlow BatchNormalization wrapper\n    def __init__(self, w=None):\n        super().__init__()\n        self.bn = keras.layers.BatchNormalization(\n            beta_initializer=keras.initializers.Constant(w.bias.numpy()),\n            gamma_initializer=keras.initializers.Constant(w.weight.numpy()),\n            moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()),\n            moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()),\n            epsilon=w.eps)\n\n    def call(self, inputs):\n        return self.bn(inputs)\n\n\nclass TFPad(keras.layers.Layer):\n    # Pad inputs in spatial dimensions 1 and 2\n    def __init__(self, pad):\n        super().__init__()\n        if isinstance(pad, int):\n            self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])\n        else:  # tuple/list\n            self.pad = tf.constant([[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]])\n\n    def call(self, inputs):\n        return tf.pad(inputs, self.pad, mode='constant', constant_values=0)\n\n\nclass TFConv(keras.layers.Layer):\n    # Standard convolution\n    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):\n        # ch_in, ch_out, weights, kernel, stride, padding, groups\n        super().__init__()\n        assert g == 1, \"TF v2.2 Conv2D does not support 'groups' argument\"\n        # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)\n        # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch\n        conv = keras.layers.Conv2D(\n            filters=c2,\n            kernel_size=k,\n            strides=s,\n            padding='SAME' if s == 1 else 'VALID',\n            use_bias=not hasattr(w, 'bn'),\n            kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),\n            bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))\n        self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])\n        self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity\n        self.act = activations(w.act) if act else tf.identity\n\n    def call(self, inputs):\n        return self.act(self.bn(self.conv(inputs)))\n\n\nclass TFDWConv(keras.layers.Layer):\n    # Depthwise convolution\n    def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):\n        # ch_in, ch_out, weights, kernel, stride, padding, groups\n        super().__init__()\n        assert c2 % c1 == 0, f'TFDWConv() output={c2} must be a multiple of input={c1} channels'\n        conv = keras.layers.DepthwiseConv2D(\n            kernel_size=k,\n            depth_multiplier=c2 // c1,\n            strides=s,\n            padding='SAME' if s == 1 else 'VALID',\n            use_bias=not hasattr(w, 'bn'),\n            depthwise_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),\n            bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))\n        self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])\n        self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity\n        self.act = activations(w.act) if act else tf.identity\n\n    def call(self, inputs):\n        return self.act(self.bn(self.conv(inputs)))\n\n\nclass TFDWConvTranspose2d(keras.layers.Layer):\n    # Depthwise ConvTranspose2d\n    def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):\n        # ch_in, ch_out, weights, kernel, stride, padding, groups\n        super().__init__()\n        assert c1 == c2, f'TFDWConv() output={c2} must be equal to input={c1} channels'\n        assert k == 4 and p1 == 1, 'TFDWConv() only valid for k=4 and p1=1'\n        weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy()\n        self.c1 = c1\n        self.conv = [\n            keras.layers.Conv2DTranspose(filters=1,\n                                         kernel_size=k,\n                                         strides=s,\n                                         padding='VALID',\n                                         output_padding=p2,\n                                         use_bias=True,\n                                         kernel_initializer=keras.initializers.Constant(weight[..., i:i + 1]),\n                                         bias_initializer=keras.initializers.Constant(bias[i])) for i in range(c1)]\n\n    def call(self, inputs):\n        return tf.concat([m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3)[:, 1:-1, 1:-1]\n\n\nclass TFFocus(keras.layers.Layer):\n    # Focus wh information into c-space\n    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):\n        # ch_in, ch_out, kernel, stride, padding, groups\n        super().__init__()\n        self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)\n\n    def call(self, inputs):  # x(b,w,h,c) -> y(b,w/2,h/2,4c)\n        # inputs = inputs / 255  # normalize 0-255 to 0-1\n        inputs = [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]]\n        return self.conv(tf.concat(inputs, 3))\n\n\nclass TFBottleneck(keras.layers.Layer):\n    # Standard bottleneck\n    def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None):  # ch_in, ch_out, shortcut, groups, expansion\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)\n        self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)\n        self.add = shortcut and c1 == c2\n\n    def call(self, inputs):\n        return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))\n\n\nclass TFCrossConv(keras.layers.Layer):\n    # Cross Convolution\n    def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1)\n        self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2)\n        self.add = shortcut and c1 == c2\n\n    def call(self, inputs):\n        return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))\n\n\nclass TFConv2d(keras.layers.Layer):\n    # Substitution for PyTorch nn.Conv2D\n    def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):\n        super().__init__()\n        assert g == 1, \"TF v2.2 Conv2D does not support 'groups' argument\"\n        self.conv = keras.layers.Conv2D(filters=c2,\n                                        kernel_size=k,\n                                        strides=s,\n                                        padding='VALID',\n                                        use_bias=bias,\n                                        kernel_initializer=keras.initializers.Constant(\n                                            w.weight.permute(2, 3, 1, 0).numpy()),\n                                        bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None)\n\n    def call(self, inputs):\n        return self.conv(inputs)\n\n\nclass TFBottleneckCSP(keras.layers.Layer):\n    # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):\n        # ch_in, ch_out, number, shortcut, groups, expansion\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)\n        self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)\n        self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3)\n        self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4)\n        self.bn = TFBN(w.bn)\n        self.act = lambda x: keras.activations.swish(x)\n        self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])\n\n    def call(self, inputs):\n        y1 = self.cv3(self.m(self.cv1(inputs)))\n        y2 = self.cv2(inputs)\n        return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3))))\n\n\nclass TFC3(keras.layers.Layer):\n    # CSP Bottleneck with 3 convolutions\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):\n        # ch_in, ch_out, number, shortcut, groups, expansion\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)\n        self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)\n        self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)\n        self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])\n\n    def call(self, inputs):\n        return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))\n\n\nclass TFC3x(keras.layers.Layer):\n    # 3 module with cross-convolutions\n    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):\n        # ch_in, ch_out, number, shortcut, groups, expansion\n        super().__init__()\n        c_ = int(c2 * e)  # hidden channels\n        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)\n        self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)\n        self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)\n        self.m = keras.Sequential([\n            TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)])\n\n    def call(self, inputs):\n        return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))\n\n\nclass TFSPP(keras.layers.Layer):\n    # Spatial pyramid pooling layer used in YOLOv3-SPP\n    def __init__(self, c1, c2, k=(5, 9, 13), w=None):\n        super().__init__()\n        c_ = c1 // 2  # hidden channels\n        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)\n        self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)\n        self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k]\n\n    def call(self, inputs):\n        x = self.cv1(inputs)\n        return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3))\n\n\nclass TFSPPF(keras.layers.Layer):\n    # Spatial pyramid pooling-Fast layer\n    def __init__(self, c1, c2, k=5, w=None):\n        super().__init__()\n        c_ = c1 // 2  # hidden channels\n        self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)\n        self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)\n        self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME')\n\n    def call(self, inputs):\n        x = self.cv1(inputs)\n        y1 = self.m(x)\n        y2 = self.m(y1)\n        return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3))\n\n\nclass TFDetect(keras.layers.Layer):\n    # TF YOLOv5 Detect layer\n    def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None):  # detection layer\n        super().__init__()\n        self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)\n        self.nc = nc  # number of classes\n        self.no = nc + 5  # number of outputs per anchor\n        self.nl = len(anchors)  # number of detection layers\n        self.na = len(anchors[0]) // 2  # number of anchors\n        self.grid = [tf.zeros(1)] * self.nl  # init grid\n        self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32)\n        self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), [self.nl, 1, -1, 1, 2])\n        self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)]\n        self.training = False  # set to False after building model\n        self.imgsz = imgsz\n        for i in range(self.nl):\n            ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]\n            self.grid[i] = self._make_grid(nx, ny)\n\n    def call(self, inputs):\n        z = []  # inference output\n        x = []\n        for i in range(self.nl):\n            x.append(self.m[i](inputs[i]))\n            # x(bs,20,20,255) to x(bs,3,20,20,85)\n            ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]\n            x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no])\n\n            if not self.training:  # inference\n                y = tf.sigmoid(x[i])\n                grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5\n                anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4\n                xy = (y[..., 0:2] * 2 + grid) * self.stride[i]  # xy\n                wh = y[..., 2:4] ** 2 * anchor_grid\n                # Normalize xywh to 0-1 to reduce calibration error\n                xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)\n                wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)\n                y = tf.concat([xy, wh, y[..., 4:]], -1)\n                z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no]))\n\n        return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), x)\n\n    @staticmethod\n    def _make_grid(nx=20, ny=20):\n        # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])\n        # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()\n        xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny))\n        return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32)\n\n\nclass TFUpsample(keras.layers.Layer):\n    # TF version of torch.nn.Upsample()\n    def __init__(self, size, scale_factor, mode, w=None):  # warning: all arguments needed including 'w'\n        super().__init__()\n        assert scale_factor == 2, \"scale_factor must be 2\"\n        self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode)\n        # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)\n        # with default arguments: align_corners=False, half_pixel_centers=False\n        # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x,\n        #                                                            size=(x.shape[1] * 2, x.shape[2] * 2))\n\n    def call(self, inputs):\n        return self.upsample(inputs)\n\n\nclass TFConcat(keras.layers.Layer):\n    # TF version of torch.concat()\n    def __init__(self, dimension=1, w=None):\n        super().__init__()\n        assert dimension == 1, \"convert only NCHW to NHWC concat\"\n        self.d = 3\n\n    def call(self, inputs):\n        return tf.concat(inputs, self.d)\n\n\ndef parse_model(d, ch, model, imgsz):  # model_dict, input_channels(3)\n    LOGGER.info(f\"\\n{'':>3}{'from':>18}{'n':>3}{'params':>10}  {'module':<40}{'arguments':<30}\")\n    anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']\n    na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors  # number of anchors\n    no = na * (nc + 5)  # number of outputs = anchors * (classes + 5)\n\n    layers, save, c2 = [], [], ch[-1]  # layers, savelist, ch out\n    for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']):  # from, number, module, args\n        m_str = m\n        m = eval(m) if isinstance(m, str) else m  # eval strings\n        for j, a in enumerate(args):\n            try:\n                args[j] = eval(a) if isinstance(a, str) else a  # eval strings\n            except NameError:\n                pass\n\n        n = max(round(n * gd), 1) if n > 1 else n  # depth gain\n        if m in [\n                nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv,\n                BottleneckCSP, C3, C3x]:\n            c1, c2 = ch[f], args[0]\n            c2 = make_divisible(c2 * gw, 8) if c2 != no else c2\n\n            args = [c1, c2, *args[1:]]\n            if m in [BottleneckCSP, C3, C3x]:\n                args.insert(2, n)\n                n = 1\n        elif m is nn.BatchNorm2d:\n            args = [ch[f]]\n        elif m is Concat:\n            c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)\n        elif m is Detect:\n            args.append([ch[x + 1] for x in f])\n            if isinstance(args[1], int):  # number of anchors\n                args[1] = [list(range(args[1] * 2))] * len(f)\n            args.append(imgsz)\n        else:\n            c2 = ch[f]\n\n        tf_m = eval('TF' + m_str.replace('nn.', ''))\n        m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \\\n            else tf_m(*args, w=model.model[i])  # module\n\n        torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args)  # module\n        t = str(m)[8:-2].replace('__main__.', '')  # module type\n        np = sum(x.numel() for x in torch_m_.parameters())  # number params\n        m_.i, m_.f, m_.type, m_.np = i, f, t, np  # attach index, 'from' index, type, number params\n        LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10}  {t:<40}{str(args):<30}')  # print\n        save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1)  # append to savelist\n        layers.append(m_)\n        ch.append(c2)\n    return keras.Sequential(layers), sorted(save)\n\n\nclass TFModel:\n    # TF YOLOv5 model\n    def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)):  # model, channels, classes\n        super().__init__()\n        if isinstance(cfg, dict):\n            self.yaml = cfg  # model dict\n        else:  # is *.yaml\n            import yaml  # for torch hub\n            self.yaml_file = Path(cfg).name\n            with open(cfg) as f:\n                self.yaml = yaml.load(f, Loader=yaml.FullLoader)  # model dict\n\n        # Define model\n        if nc and nc != self.yaml['nc']:\n            LOGGER.info(f\"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}\")\n            self.yaml['nc'] = nc  # override yaml value\n        self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)\n\n    def predict(self,\n                inputs,\n                tf_nms=False,\n                agnostic_nms=False,\n                topk_per_class=100,\n                topk_all=100,\n                iou_thres=0.45,\n                conf_thres=0.25):\n        y = []  # outputs\n        x = inputs\n        for m in self.model.layers:\n            if m.f != -1:  # if not from previous layer\n                x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]  # from earlier layers\n\n            x = m(x)  # run\n            y.append(x if m.i in self.savelist else None)  # save output\n\n        # Add TensorFlow NMS\n        if tf_nms:\n            boxes = self._xywh2xyxy(x[0][..., :4])\n            probs = x[0][:, :, 4:5]\n            classes = x[0][:, :, 5:]\n            scores = probs * classes\n            if agnostic_nms:\n                nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres)\n            else:\n                boxes = tf.expand_dims(boxes, 2)\n                nms = tf.image.combined_non_max_suppression(boxes,\n                                                            scores,\n                                                            topk_per_class,\n                                                            topk_all,\n                                                            iou_thres,\n                                                            conf_thres,\n                                                            clip_boxes=False)\n            return nms, x[1]\n        return x[0]  # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...]\n        # x = x[0][0]  # [x(1,6300,85), ...] to x(6300,85)\n        # xywh = x[..., :4]  # x(6300,4) boxes\n        # conf = x[..., 4:5]  # x(6300,1) confidences\n        # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1))  # x(6300,1)  classes\n        # return tf.concat([conf, cls, xywh], 1)\n\n    @staticmethod\n    def _xywh2xyxy(xywh):\n        # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n        x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1)\n        return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1)\n\n\nclass AgnosticNMS(keras.layers.Layer):\n    # TF Agnostic NMS\n    def call(self, input, topk_all, iou_thres, conf_thres):\n        # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450\n        return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres),\n                         input,\n                         fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32),\n                         name='agnostic_nms')\n\n    @staticmethod\n    def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25):  # agnostic NMS\n        boxes, classes, scores = x\n        class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32)\n        scores_inp = tf.reduce_max(scores, -1)\n        selected_inds = tf.image.non_max_suppression(boxes,\n                                                     scores_inp,\n                                                     max_output_size=topk_all,\n                                                     iou_threshold=iou_thres,\n                                                     score_threshold=conf_thres)\n        selected_boxes = tf.gather(boxes, selected_inds)\n        padded_boxes = tf.pad(selected_boxes,\n                              paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]],\n                              mode=\"CONSTANT\",\n                              constant_values=0.0)\n        selected_scores = tf.gather(scores_inp, selected_inds)\n        padded_scores = tf.pad(selected_scores,\n                               paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],\n                               mode=\"CONSTANT\",\n                               constant_values=-1.0)\n        selected_classes = tf.gather(class_inds, selected_inds)\n        padded_classes = tf.pad(selected_classes,\n                                paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],\n                                mode=\"CONSTANT\",\n                                constant_values=-1.0)\n        valid_detections = tf.shape(selected_inds)[0]\n        return padded_boxes, padded_scores, padded_classes, valid_detections\n\n\ndef activations(act=nn.SiLU):\n    # Returns TF activation from input PyTorch activation\n    if isinstance(act, nn.LeakyReLU):\n        return lambda x: keras.activations.relu(x, alpha=0.1)\n    elif isinstance(act, nn.Hardswish):\n        return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667\n    elif isinstance(act, (nn.SiLU, SiLU)):\n        return lambda x: keras.activations.swish(x)\n    else:\n        raise Exception(f'no matching TensorFlow activation found for PyTorch activation {act}')\n\n\ndef representative_dataset_gen(dataset, ncalib=100):\n    # Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays\n    for n, (path, img, im0s, vid_cap, string) in enumerate(dataset):\n        im = np.transpose(img, [1, 2, 0])\n        im = np.expand_dims(im, axis=0).astype(np.float32)\n        im /= 255\n        yield [im]\n        if n >= ncalib:\n            break\n\n\ndef run(\n        weights=ROOT / 'yolov5s.pt',  # weights path\n        imgsz=(640, 640),  # inference size h,w\n        batch_size=1,  # batch size\n        dynamic=False,  # dynamic batch size\n):\n    # PyTorch model\n    im = torch.zeros((batch_size, 3, *imgsz))  # BCHW image\n    model = attempt_load(weights, device=torch.device('cpu'), inplace=True, fuse=False)\n    _ = model(im)  # inference\n    model.info()\n\n    # TensorFlow model\n    im = tf.zeros((batch_size, *imgsz, 3))  # BHWC image\n    tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)\n    _ = tf_model.predict(im)  # inference\n\n    # Keras model\n    im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)\n    keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im))\n    keras_model.summary()\n\n    LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\\nUse export.py for TF model export.')\n\n\ndef parse_opt():\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')\n    parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')\n    parser.add_argument('--batch-size', type=int, default=1, help='batch size')\n    parser.add_argument('--dynamic', action='store_true', help='dynamic batch size')\n    opt = parser.parse_args()\n    opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1  # expand\n    print_args(vars(opt))\n    return opt\n\n\ndef main(opt):\n    run(**vars(opt))\n\n\nif __name__ == \"__main__\":\n    opt = parse_opt()\n    main(opt)\n"
  },
  {
    "path": "module/detect/models/yolo.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nYOLO-specific modules\n\nUsage:\n    $ python path/to/models/yolo.py --cfg yolov5s.yaml\n\"\"\"\n\nimport argparse\nimport contextlib\nimport os\nimport platform\nimport sys\nfrom copy import deepcopy\nfrom pathlib import Path\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\nif platform.system() != 'Windows':\n    ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative\n\nfrom models.common import *\nfrom models.experimental import *\nfrom utils.autoanchor import check_anchor_order\nfrom utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args\nfrom utils.plots import feature_visualization\nfrom utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device,\n                               time_sync)\n\ntry:\n    import thop  # for FLOPs computation\nexcept ImportError:\n    thop = None\n\n\nclass Detect(nn.Module):\n    stride = None  # strides computed during build\n    onnx_dynamic = False  # ONNX export parameter\n    export = False  # export mode\n\n    def __init__(self, nc=80, anchors=(), ch=(), inplace=True):  # detection layer\n        super().__init__()\n        self.nc = nc  # number of classes\n        self.no = nc + 5  # number of outputs per anchor\n        self.nl = len(anchors)  # number of detection layers\n        self.na = len(anchors[0]) // 2  # number of anchors\n        self.grid = [torch.zeros(1)] * self.nl  # init grid\n        self.anchor_grid = [torch.zeros(1)] * self.nl  # init anchor grid\n        self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2))  # shape(nl,na,2)\n        self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch)  # output conv\n        self.inplace = inplace  # use in-place ops (e.g. slice assignment)\n\n    def forward(self, x):\n        z = []  # inference output\n        for i in range(self.nl):\n            x[i] = self.m[i](x[i])  # conv\n            bs, _, ny, nx = x[i].shape  # x(bs,255,20,20) to x(bs,3,20,20,85)\n            x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()\n\n            if not self.training:  # inference\n                if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:\n                    self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)\n\n                y = x[i].sigmoid()\n                if self.inplace:\n                    y[..., 0:2] = (y[..., 0:2] * 2 + self.grid[i]) * self.stride[i]  # xy\n                    y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i]  # wh\n                else:  # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953\n                    xy, wh, conf = y.split((2, 2, self.nc + 1), 4)  # y.tensor_split((2, 4, 5), 4)  # torch 1.8.0\n                    xy = (xy * 2 + self.grid[i]) * self.stride[i]  # xy\n                    wh = (wh * 2) ** 2 * self.anchor_grid[i]  # wh\n                    y = torch.cat((xy, wh, conf), 4)\n                z.append(y.view(bs, -1, self.no))\n\n        return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)\n\n    def _make_grid(self, nx=20, ny=20, i=0):\n        d = self.anchors[i].device\n        t = self.anchors[i].dtype\n        shape = 1, self.na, ny, nx, 2  # grid shape\n        y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t)\n        if check_version(torch.__version__, '1.10.0'):  # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility\n            yv, xv = torch.meshgrid(y, x, indexing='ij')\n        else:\n            yv, xv = torch.meshgrid(y, x)\n        grid = torch.stack((xv, yv), 2).expand(shape) - 0.5  # add grid offset, i.e. y = 2.0 * x - 0.5\n        anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape)\n        return grid, anchor_grid\n\n\nclass Model(nn.Module):\n    # YOLOv5 model\n    def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None):  # model, input channels, number of classes\n        super().__init__()\n        if isinstance(cfg, dict):\n            self.yaml = cfg  # model dict\n        else:  # is *.yaml\n            import yaml  # for torch hub\n            self.yaml_file = Path(cfg).name\n            with open(cfg, encoding='ascii', errors='ignore') as f:\n                self.yaml = yaml.safe_load(f)  # model dict\n\n        # Define model\n        ch = self.yaml['ch'] = self.yaml.get('ch', ch)  # input channels\n        if nc and nc != self.yaml['nc']:\n            LOGGER.info(f\"overriding model.yaml nc={self.yaml['nc']} with nc={nc}\")\n            self.yaml['nc'] = nc  # override yaml value\n        if anchors:\n            LOGGER.info(f'overriding model.yaml anchors with anchors={anchors}')\n            self.yaml['anchors'] = round(anchors)  # override yaml value\n        self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch])  # model, savelist\n        self.names = [str(i) for i in range(self.yaml['nc'])]  # default names\n        self.inplace = self.yaml.get('inplace', True)\n\n        # Build strides, anchors\n        m = self.model[-1]  # Detect()\n        if isinstance(m, Detect):\n            s = 256  # 2x min stride\n            m.inplace = self.inplace\n            m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))])  # forward\n            check_anchor_order(m)  # must be in pixel-space (not grid-space)\n            m.anchors /= m.stride.view(-1, 1, 1)\n            self.stride = m.stride\n            self._initialize_biases()  # only run once\n\n        # Init weights, biases\n        initialize_weights(self)\n        self.info()\n        LOGGER.debug('')\n\n    def forward(self, x, augment=False, profile=False, visualize=False):\n        if augment:\n            return self._forward_augment(x)  # augmented inference, None\n        return self._forward_once(x, profile, visualize)  # single-scale inference, train\n\n    def _forward_augment(self, x):\n        img_size = x.shape[-2:]  # height, width\n        s = [1, 0.83, 0.67]  # scales\n        f = [None, 3, None]  # flips (2-ud, 3-lr)\n        y = []  # outputs\n        for si, fi in zip(s, f):\n            xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))\n            yi = self._forward_once(xi)[0]  # forward\n            # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1])  # save\n            yi = self._descale_pred(yi, fi, si, img_size)\n            y.append(yi)\n        y = self._clip_augmented(y)  # clip augmented tails\n        return torch.cat(y, 1), None  # augmented inference, train\n\n    def _forward_once(self, x, profile=False, visualize=False):\n        y, dt = [], []  # outputs\n        for m in self.model:\n            if m.f != -1:  # if not from previous layer\n                x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]  # from earlier layers\n            if profile:\n                self._profile_one_layer(m, x, dt)\n            x = m(x)  # run\n            y.append(x if m.i in self.save else None)  # save output\n            if visualize:\n                feature_visualization(x, m.type, m.i, save_dir=visualize)\n        return x\n\n    def _descale_pred(self, p, flips, scale, img_size):\n        # de-scale predictions following augmented inference (inverse operation)\n        if self.inplace:\n            p[..., :4] /= scale  # de-scale\n            if flips == 2:\n                p[..., 1] = img_size[0] - p[..., 1]  # de-flip ud\n            elif flips == 3:\n                p[..., 0] = img_size[1] - p[..., 0]  # de-flip lr\n        else:\n            x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale  # de-scale\n            if flips == 2:\n                y = img_size[0] - y  # de-flip ud\n            elif flips == 3:\n                x = img_size[1] - x  # de-flip lr\n            p = torch.cat((x, y, wh, p[..., 4:]), -1)\n        return p\n\n    def _clip_augmented(self, y):\n        # Clip YOLOv5 augmented inference tails\n        nl = self.model[-1].nl  # number of detection layers (P3-P5)\n        g = sum(4 ** x for x in range(nl))  # grid points\n        e = 1  # exclude layer count\n        i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e))  # indices\n        y[0] = y[0][:, :-i]  # large\n        i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e))  # indices\n        y[-1] = y[-1][:, i:]  # small\n        return y\n\n    def _profile_one_layer(self, m, x, dt):\n        c = isinstance(m, Detect)  # is final layer, copy input as inplace fix\n        o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0  # FLOPs\n        t = time_sync()\n        for _ in range(10):\n            m(x.copy() if c else x)\n        dt.append((time_sync() - t) * 100)\n        if m == self.model[0]:\n            LOGGER.info(f\"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s}  module\")\n        LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f}  {m.type}')\n        if c:\n            LOGGER.info(f\"{sum(dt):10.2f} {'-':>10s} {'-':>10s}  Total\")\n\n    def _initialize_biases(self, cf=None):  # initialize biases into Detect(), cf is class frequency\n        # https://arxiv.org/abs/1708.02002 section 3.3\n        # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n        m = self.model[-1]  # Detect() module\n        for mi, s in zip(m.m, m.stride):  # from\n            b = mi.bias.view(m.na, -1).detach()  # conv.bias(255) to (3,85)\n            b[:, 4] += math.log(8 / (640 / s) ** 2)  # obj (8 objects per 640 image)\n            b[:, 5:] += math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum())  # cls\n            mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n    def _print_biases(self):\n        m = self.model[-1]  # Detect() module\n        for mi in m.m:  # from\n            b = mi.bias.detach().view(m.na, -1).T  # conv.bias(255) to (3,85)\n            LOGGER.info(\n                ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())\n            )\n\n    # def _print_weights(self):\n    #     for m in self.model.modules():\n    #         if type(m) is Bottleneck:\n    #             LOGGER.info('%10.3g' % (m.w.detach().sigmoid() * 2))  # shortcut weights\n\n    def fuse(self):  # fuse model Conv2d() + BatchNorm2d() layers\n        LOGGER.info('fuse yolo layers (conv & bn)')\n        for m in self.model.modules():\n            if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):\n                m.conv = fuse_conv_and_bn(m.conv, m.bn)  # update conv\n                delattr(m, 'bn')  # remove batchnorm\n                m.forward = m.forward_fuse  # update forward\n        self.info()\n        return self\n\n    def info(self, verbose=False, img_size=640):  # print model information\n        model_info(self, verbose, img_size)\n\n    def _apply(self, fn):\n        # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers\n        self = super()._apply(fn)\n        m = self.model[-1]  # Detect()\n        if isinstance(m, Detect):\n            m.stride = fn(m.stride)\n            m.grid = list(map(fn, m.grid))\n            if isinstance(m.anchor_grid, list):\n                m.anchor_grid = list(map(fn, m.anchor_grid))\n        return self\n\n\ndef parse_model(d, ch):  # model_dict, input_channels(3)\n    LOGGER.debug(f\"\\n{'':>3}{'from':>18}{'n':>3}{'params':>10}  {'module':<40}{'arguments':<30}\")\n    anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']\n    na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors  # number of anchors\n    no = na * (nc + 5)  # number of outputs = anchors * (classes + 5)\n\n    layers, save, c2 = [], [], ch[-1]  # layers, savelist, ch out\n    for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']):  # from, number, module, args\n        m = eval(m) if isinstance(m, str) else m  # eval strings\n        for j, a in enumerate(args):\n            with contextlib.suppress(NameError):\n                args[j] = eval(a) if isinstance(a, str) else a  # eval strings\n\n        n = n_ = max(round(n * gd), 1) if n > 1 else n  # depth gain\n        if m in (Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,\n                 BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x):\n            c1, c2 = ch[f], args[0]\n            if c2 != no:  # if not output\n                c2 = make_divisible(c2 * gw, 8)\n\n            args = [c1, c2, *args[1:]]\n            if m in [BottleneckCSP, C3, C3TR, C3Ghost, C3x]:\n                args.insert(2, n)  # number of repeats\n                n = 1\n        elif m is nn.BatchNorm2d:\n            args = [ch[f]]\n        elif m is Concat:\n            c2 = sum(ch[x] for x in f)\n        elif m is Detect:\n            args.append([ch[x] for x in f])\n            if isinstance(args[1], int):  # number of anchors\n                args[1] = [list(range(args[1] * 2))] * len(f)\n        elif m is Contract:\n            c2 = ch[f] * args[0] ** 2\n        elif m is Expand:\n            c2 = ch[f] // args[0] ** 2\n        else:\n            c2 = ch[f]\n\n        m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args)  # module\n        t = str(m)[8:-2].replace('__main__.', '')  # module type\n        np = sum(x.numel() for x in m_.parameters())  # number params\n        m_.i, m_.f, m_.type, m_.np = i, f, t, np  # attach index, 'from' index, type, number params\n        LOGGER.debug(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f}  {t:<40}{str(args):<30}')  # print\n        save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1)  # append to savelist\n        layers.append(m_)\n        if i == 0:\n            ch = []\n        ch.append(c2)\n    return nn.Sequential(*layers), sorted(save)\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')\n    parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs')\n    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')\n    parser.add_argument('--profile', action='store_true', help='profile model speed')\n    parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer')\n    parser.add_argument('--test', action='store_true', help='test all yolo*.yaml')\n    opt = parser.parse_args()\n    opt.cfg = check_yaml(opt.cfg)  # check YAML\n    print_args(vars(opt))\n    device = select_device(opt.device)\n\n    # Create model\n    im = torch.rand(opt.batch_size, 3, 640, 640).to(device)\n    model = Model(opt.cfg).to(device)\n\n    # Options\n    if opt.line_profile:  # profile layer by layer\n        _ = model(im, profile=True)\n\n    elif opt.profile:  # profile forward-backward\n        results = profile(input=im, ops=[model], n=3)\n\n    elif opt.test:  # test all models\n        for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'):\n            try:\n                _ = Model(cfg)\n            except Exception as e:\n                print(f'Error in {cfg}: {e}')\n\n    else:  # report fused model summary\n        model.fuse()\n"
  },
  {
    "path": "module/detect/models/yolov5l.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.0  # model depth multiple\nwidth_multiple: 1.0  # layer channel multiple\nanchors:\n  - [10,13, 16,30, 33,23]  # P3/8\n  - [30,61, 62,45, 59,119]  # P4/16\n  - [116,90, 156,198, 373,326]  # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead:\n  [[-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 13\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 14], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 10], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)\n\n   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "module/detect/models/yolov5m.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 0.67  # model depth multiple\nwidth_multiple: 0.75  # layer channel multiple\nanchors:\n  - [10,13, 16,30, 33,23]  # P3/8\n  - [30,61, 62,45, 59,119]  # P4/16\n  - [116,90, 156,198, 373,326]  # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead:\n  [[-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 13\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 14], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 10], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)\n\n   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "module/detect/models/yolov5n.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 0.33  # model depth multiple\nwidth_multiple: 0.25  # layer channel multiple\nanchors:\n  - [10,13, 16,30, 33,23]  # P3/8\n  - [30,61, 62,45, 59,119]  # P4/16\n  - [116,90, 156,198, 373,326]  # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead:\n  [[-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 13\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 14], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 10], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)\n\n   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "module/detect/models/yolov5s.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 0.33  # model depth multiple\nwidth_multiple: 0.50  # layer channel multiple\nanchors:\n  - [10,13, 16,30, 33,23]  # P3/8\n  - [30,61, 62,45, 59,119]  # P4/16\n  - [116,90, 156,198, 373,326]  # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead:\n  [[-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 13\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 14], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 10], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)\n\n   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "module/detect/models/yolov5x.yaml",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\n# Parameters\nnc: 80  # number of classes\ndepth_multiple: 1.33  # model depth multiple\nwidth_multiple: 1.25  # layer channel multiple\nanchors:\n  - [10,13, 16,30, 33,23]  # P3/8\n  - [30,61, 62,45, 59,119]  # P4/16\n  - [116,90, 156,198, 373,326]  # P5/32\n\n# YOLOv5 v6.0 backbone\nbackbone:\n  # [from, number, module, args]\n  [[-1, 1, Conv, [64, 6, 2, 2]],  # 0-P1/2\n   [-1, 1, Conv, [128, 3, 2]],  # 1-P2/4\n   [-1, 3, C3, [128]],\n   [-1, 1, Conv, [256, 3, 2]],  # 3-P3/8\n   [-1, 6, C3, [256]],\n   [-1, 1, Conv, [512, 3, 2]],  # 5-P4/16\n   [-1, 9, C3, [512]],\n   [-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32\n   [-1, 3, C3, [1024]],\n   [-1, 1, SPPF, [1024, 5]],  # 9\n  ]\n\n# YOLOv5 v6.0 head\nhead:\n  [[-1, 1, Conv, [512, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 6], 1, Concat, [1]],  # cat backbone P4\n   [-1, 3, C3, [512, False]],  # 13\n\n   [-1, 1, Conv, [256, 1, 1]],\n   [-1, 1, nn.Upsample, [None, 2, 'nearest']],\n   [[-1, 4], 1, Concat, [1]],  # cat backbone P3\n   [-1, 3, C3, [256, False]],  # 17 (P3/8-small)\n\n   [-1, 1, Conv, [256, 3, 2]],\n   [[-1, 14], 1, Concat, [1]],  # cat head P4\n   [-1, 3, C3, [512, False]],  # 20 (P4/16-medium)\n\n   [-1, 1, Conv, [512, 3, 2]],\n   [[-1, 10], 1, Concat, [1]],  # cat head P5\n   [-1, 3, C3, [1024, False]],  # 23 (P5/32-large)\n\n   [[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)\n  ]\n"
  },
  {
    "path": "module/detect/requirements.txt",
    "content": "# YOLOv5 requirements\n# Usage: pip install -r requirements.txt\n\n# Base ----------------------------------------\nmatplotlib>=3.2.2\nnumpy>=1.18.5\nopencv-python>=4.1.1\nPillow>=7.1.2\nPyYAML>=5.3.1\nrequests>=2.23.0\nscipy>=1.4.1\ntorch>=1.7.0\ntorchvision>=0.8.1\ntqdm>=4.64.0\nprotobuf<4.21.3  # https://github.com/ultralytics/yolov5/issues/8012\n\n# Logging -------------------------------------\ntensorboard>=2.4.1\n# wandb\n\n# Plotting ------------------------------------\npandas>=1.1.4\nseaborn>=0.11.0\n\n# Export --------------------------------------\n# coremltools>=4.1  # CoreML export\n# onnx>=1.9.0  # ONNX export\n# onnx-simplifier>=0.4.1  # ONNX simplifier\n# nvidia-pyindex  # TensorRT export\n# nvidia-tensorrt  # TensorRT export\n# scikit-learn==0.19.2  # CoreML quantization\n# tensorflow>=2.4.1  # TFLite export\n# tensorflowjs>=3.9.0  # TF.js export\n# openvino-dev  # OpenVINO export\n\n# Extras --------------------------------------\nipython  # interactive notebook\npsutil  # system utilization\nthop>=0.1.1  # FLOPs computation\n# albumentations>=1.0.3\n# pycocotools>=2.0  # COCO mAP\n# roboflow\n"
  },
  {
    "path": "module/detect/utils/__init__.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\r\n\"\"\"\r\nutils/initialization\r\n\"\"\"\r\n\r\n\r\ndef notebook_init(verbose=True):\r\n    # Check system software and hardware\r\n    print('Checking setup...')\r\n\r\n    import os\r\n    import shutil\r\n\r\n    from utils.general import check_requirements, emojis, is_colab\r\n    from utils.torch_utils import select_device  # imports\r\n\r\n    check_requirements(('psutil', 'IPython'))\r\n    import psutil\r\n    from IPython import display  # to display images and clear console output\r\n\r\n    if is_colab():\r\n        shutil.rmtree('/content/sample_data', ignore_errors=True)  # remove colab /sample_data directory\r\n\r\n    # System info\r\n    if verbose:\r\n        gb = 1 << 30  # bytes to GiB (1024 ** 3)\r\n        ram = psutil.virtual_memory().total\r\n        total, used, free = shutil.disk_usage(\"/\")\r\n        display.clear_output()\r\n        s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)'\r\n    else:\r\n        s = ''\r\n\r\n    select_device(newline=False)\r\n    print(emojis(f'Setup complete ✅ {s}'))\r\n    return display\r\n"
  },
  {
    "path": "module/detect/utils/activations.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nActivation functions\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass SiLU(nn.Module):\n    # SiLU activation https://arxiv.org/pdf/1606.08415.pdf\n    @staticmethod\n    def forward(x):\n        return x * torch.sigmoid(x)\n\n\nclass Hardswish(nn.Module):\n    # Hard-SiLU activation\n    @staticmethod\n    def forward(x):\n        # return x * F.hardsigmoid(x)  # for TorchScript and CoreML\n        return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0  # for TorchScript, CoreML and ONNX\n\n\nclass Mish(nn.Module):\n    # Mish activation https://github.com/digantamisra98/Mish\n    @staticmethod\n    def forward(x):\n        return x * F.softplus(x).tanh()\n\n\nclass MemoryEfficientMish(nn.Module):\n    # Mish activation memory-efficient\n    class F(torch.autograd.Function):\n\n        @staticmethod\n        def forward(ctx, x):\n            ctx.save_for_backward(x)\n            return x.mul(torch.tanh(F.softplus(x)))  # x * tanh(ln(1 + exp(x)))\n\n        @staticmethod\n        def backward(ctx, grad_output):\n            x = ctx.saved_tensors[0]\n            sx = torch.sigmoid(x)\n            fx = F.softplus(x).tanh()\n            return grad_output * (fx + x * sx * (1 - fx * fx))\n\n    def forward(self, x):\n        return self.F.apply(x)\n\n\nclass FReLU(nn.Module):\n    # FReLU activation https://arxiv.org/abs/2007.11824\n    def __init__(self, c1, k=3):  # ch_in, kernel\n        super().__init__()\n        self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)\n        self.bn = nn.BatchNorm2d(c1)\n\n    def forward(self, x):\n        return torch.max(x, self.bn(self.conv(x)))\n\n\nclass AconC(nn.Module):\n    r\"\"\" ACON activation (activate or not)\n    AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter\n    according to \"Activate or Not: Learning Customized Activation\" <https://arxiv.org/pdf/2009.04759.pdf>.\n    \"\"\"\n\n    def __init__(self, c1):\n        super().__init__()\n        self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))\n        self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))\n        self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))\n\n    def forward(self, x):\n        dpx = (self.p1 - self.p2) * x\n        return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x\n\n\nclass MetaAconC(nn.Module):\n    r\"\"\" ACON activation (activate or not)\n    MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network\n    according to \"Activate or Not: Learning Customized Activation\" <https://arxiv.org/pdf/2009.04759.pdf>.\n    \"\"\"\n\n    def __init__(self, c1, k=1, s=1, r=16):  # ch_in, kernel, stride, r\n        super().__init__()\n        c2 = max(r, c1 // r)\n        self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))\n        self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))\n        self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)\n        self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)\n        # self.bn1 = nn.BatchNorm2d(c2)\n        # self.bn2 = nn.BatchNorm2d(c1)\n\n    def forward(self, x):\n        y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)\n        # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891\n        # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y)))))  # bug/unstable\n        beta = torch.sigmoid(self.fc2(self.fc1(y)))  # bug patch BN layers removed\n        dpx = (self.p1 - self.p2) * x\n        return dpx * torch.sigmoid(beta * dpx) + self.p2 * x\n"
  },
  {
    "path": "module/detect/utils/augmentations.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nImage augmentation functions\n\"\"\"\n\nimport math\nimport random\n\nimport cv2\nimport numpy as np\nfrom utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box\nfrom utils.metrics import bbox_ioa\n\n\nclass Albumentations:\n    # YOLOv5 Albumentations class (optional, only used if package is installed)\n    def __init__(self):\n        self.transform = None\n        try:\n            import albumentations as A\n            check_version(A.__version__, '1.0.3', hard=True)  # version requirement\n\n            T = [\n                A.Blur(p=0.01),\n                A.MedianBlur(p=0.01),\n                A.ToGray(p=0.01),\n                A.CLAHE(p=0.01),\n                A.RandomBrightnessContrast(p=0.0),\n                A.RandomGamma(p=0.0),\n                A.ImageCompression(quality_lower=75, p=0.0)]  # transforms\n            self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))\n\n            LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))\n        except ImportError:  # package not installed, skip\n            pass\n        except Exception as e:\n            LOGGER.info(colorstr('albumentations: ') + f'{e}')\n\n    def __call__(self, im, labels, p=1.0):\n        if self.transform and random.random() < p:\n            new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0])  # transformed\n            im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])\n        return im, labels\n\n\ndef augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):\n    # HSV color-space augmentation\n    if hgain or sgain or vgain:\n        r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1  # random gains\n        hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))\n        dtype = im.dtype  # uint8\n\n        x = np.arange(0, 256, dtype=r.dtype)\n        lut_hue = ((x * r[0]) % 180).astype(dtype)\n        lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n        lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n        im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))\n        cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im)  # no return needed\n\n\ndef hist_equalize(im, clahe=True, bgr=False):\n    # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255\n    yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)\n    if clahe:\n        c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n        yuv[:, :, 0] = c.apply(yuv[:, :, 0])\n    else:\n        yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0])  # equalize Y channel histogram\n    return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB)  # convert YUV image to RGB\n\n\ndef replicate(im, labels):\n    # Replicate labels\n    h, w = im.shape[:2]\n    boxes = labels[:, 1:].astype(int)\n    x1, y1, x2, y2 = boxes.T\n    s = ((x2 - x1) + (y2 - y1)) / 2  # side length (pixels)\n    for i in s.argsort()[:round(s.size * 0.5)]:  # smallest indices\n        x1b, y1b, x2b, y2b = boxes[i]\n        bh, bw = y2b - y1b, x2b - x1b\n        yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw))  # offset x, y\n        x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]\n        im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b]  # im4[ymin:ymax, xmin:xmax]\n        labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)\n\n    return im, labels\n\n\ndef letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):\n    # Resize and pad image while meeting stride-multiple constraints\n    shape = im.shape[:2]  # current shape [height, width]\n    if isinstance(new_shape, int):\n        new_shape = (new_shape, new_shape)\n\n    # Scale ratio (new / old)\n    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n    if not scaleup:  # only scale down, do not scale up (for better eval mAP)\n        r = min(r, 1.0)\n\n    # Compute padding\n    ratio = r, r  # width, height ratios\n    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding\n    if auto:  # minimum rectangle\n        dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding\n    elif scaleFill:  # stretch\n        dw, dh = 0.0, 0.0\n        new_unpad = (new_shape[1], new_shape[0])\n        ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios\n\n    dw /= 2  # divide padding into 2 sides\n    dh /= 2\n\n    if shape[::-1] != new_unpad:  # resize\n        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)\n    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border\n    return im, ratio, (dw, dh)\n\n\ndef random_perspective(\n        im,\n        targets=(),\n        segments=(),\n        degrees=10,\n        translate=.1,\n        scale=.1,\n        shear=10,\n        perspective=0.0,\n        border=(0, 0)\n        ):\n    # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))\n    # targets = [cls, xyxy]\n\n    height = im.shape[0] + border[0] * 2  # shape(h,w,c)\n    width = im.shape[1] + border[1] * 2\n\n    # Center\n    C = np.eye(3)\n    C[0, 2] = -im.shape[1] / 2  # x translation (pixels)\n    C[1, 2] = -im.shape[0] / 2  # y translation (pixels)\n\n    # Perspective\n    P = np.eye(3)\n    P[2, 0] = random.uniform(-perspective, perspective)  # x perspective (about y)\n    P[2, 1] = random.uniform(-perspective, perspective)  # y perspective (about x)\n\n    # Rotation and Scale\n    R = np.eye(3)\n    a = random.uniform(-degrees, degrees)\n    # a += random.choice([-180, -90, 0, 90])  # add 90deg rotations to small rotations\n    s = random.uniform(1 - scale, 1 + scale)\n    # s = 2 ** random.uniform(-scale, scale)\n    R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n\n    # Shear\n    S = np.eye(3)\n    S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # x shear (deg)\n    S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # y shear (deg)\n\n    # Translation\n    T = np.eye(3)\n    T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width  # x translation (pixels)\n    T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height  # y translation (pixels)\n\n    # Combined rotation matrix\n    M = T @ S @ R @ P @ C  # order of operations (right to left) is IMPORTANT\n    if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any():  # image changed\n        if perspective:\n            im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))\n        else:  # affine\n            im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))\n\n    # Visualize\n    # import matplotlib.pyplot as plt\n    # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()\n    # ax[0].imshow(im[:, :, ::-1])  # base\n    # ax[1].imshow(im2[:, :, ::-1])  # warped\n\n    # Transform label coordinates\n    n = len(targets)\n    if n:\n        use_segments = any(x.any() for x in segments)\n        new = np.zeros((n, 4))\n        if use_segments:  # warp segments\n            segments = resample_segments(segments)  # upsample\n            for i, segment in enumerate(segments):\n                xy = np.ones((len(segment), 3))\n                xy[:, :2] = segment\n                xy = xy @ M.T  # transform\n                xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]  # perspective rescale or affine\n\n                # clip\n                new[i] = segment2box(xy, width, height)\n\n        else:  # warp boxes\n            xy = np.ones((n * 4, 3))\n            xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2)  # x1y1, x2y2, x1y2, x2y1\n            xy = xy @ M.T  # transform\n            xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8)  # perspective rescale or affine\n\n            # create new boxes\n            x = xy[:, [0, 2, 4, 6]]\n            y = xy[:, [1, 3, 5, 7]]\n            new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n\n            # clip\n            new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)\n            new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)\n\n        # filter candidates\n        i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)\n        targets = targets[i]\n        targets[:, 1:5] = new[i]\n\n    return im, targets\n\n\ndef copy_paste(im, labels, segments, p=0.5):\n    # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)\n    n = len(segments)\n    if p and n:\n        h, w, c = im.shape  # height, width, channels\n        im_new = np.zeros(im.shape, np.uint8)\n        for j in random.sample(range(n), k=round(p * n)):\n            l, s = labels[j], segments[j]\n            box = w - l[3], l[2], w - l[1], l[4]\n            ioa = bbox_ioa(box, labels[:, 1:5])  # intersection over area\n            if (ioa < 0.30).all():  # allow 30% obscuration of existing labels\n                labels = np.concatenate((labels, [[l[0], *box]]), 0)\n                segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))\n                cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)\n\n        result = cv2.bitwise_and(src1=im, src2=im_new)\n        result = cv2.flip(result, 1)  # augment segments (flip left-right)\n        i = result > 0  # pixels to replace\n        # i[:, :] = result.max(2).reshape(h, w, 1)  # act over ch\n        im[i] = result[i]  # cv2.imwrite('debug.jpg', im)  # debug\n\n    return im, labels, segments\n\n\ndef cutout(im, labels, p=0.5):\n    # Applies image cutout augmentation https://arxiv.org/abs/1708.04552\n    if random.random() < p:\n        h, w = im.shape[:2]\n        scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16  # image size fraction\n        for s in scales:\n            mask_h = random.randint(1, int(h * s))  # create random masks\n            mask_w = random.randint(1, int(w * s))\n\n            # box\n            xmin = max(0, random.randint(0, w) - mask_w // 2)\n            ymin = max(0, random.randint(0, h) - mask_h // 2)\n            xmax = min(w, xmin + mask_w)\n            ymax = min(h, ymin + mask_h)\n\n            # apply random color mask\n            im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]\n\n            # return unobscured labels\n            if len(labels) and s > 0.03:\n                box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)\n                ioa = bbox_ioa(box, labels[:, 1:5])  # intersection over area\n                labels = labels[ioa < 0.60]  # remove >60% obscured labels\n\n    return labels\n\n\ndef mixup(im, labels, im2, labels2):\n    # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf\n    r = np.random.beta(32.0, 32.0)  # mixup ratio, alpha=beta=32.0\n    im = (im * r + im2 * (1 - r)).astype(np.uint8)\n    labels = np.concatenate((labels, labels2), 0)\n    return im, labels\n\n\ndef box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16):  # box1(4,n), box2(4,n)\n    # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio\n    w1, h1 = box1[2] - box1[0], box1[3] - box1[1]\n    w2, h2 = box2[2] - box2[0], box2[3] - box2[1]\n    ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps))  # aspect ratio\n    return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr)  # candidates\n"
  },
  {
    "path": "module/detect/utils/autoanchor.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nAutoAnchor utils\n\"\"\"\n\nimport random\n\nimport numpy as np\nimport torch\nimport yaml\nfrom tqdm import tqdm\n\nfrom utils.general import LOGGER, colorstr, emojis\n\nPREFIX = colorstr('AutoAnchor: ')\n\n\ndef check_anchor_order(m):\n    # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary\n    a = m.anchors.prod(-1).mean(-1).view(-1)  # mean anchor area per output layer\n    da = a[-1] - a[0]  # delta a\n    ds = m.stride[-1] - m.stride[0]  # delta s\n    if da and (da.sign() != ds.sign()):  # same order\n        LOGGER.info(f'{PREFIX}Reversing anchor order')\n        m.anchors[:] = m.anchors.flip(0)\n\n\ndef check_anchors(dataset, model, thr=4.0, imgsz=640):\n    # Check anchor fit to data, recompute if necessary\n    m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1]  # Detect()\n    shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)\n    scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1))  # augment scale\n    wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float()  # wh\n\n    def metric(k):  # compute metric\n        r = wh[:, None] / k[None]\n        x = torch.min(r, 1 / r).min(2)[0]  # ratio metric\n        best = x.max(1)[0]  # best_x\n        aat = (x > 1 / thr).float().sum(1).mean()  # anchors above threshold\n        bpr = (best > 1 / thr).float().mean()  # best possible recall\n        return bpr, aat\n\n    stride = m.stride.to(m.anchors.device).view(-1, 1, 1)  # model strides\n    anchors = m.anchors.clone() * stride  # current anchors\n    bpr, aat = metric(anchors.cpu().view(-1, 2))\n    s = f'\\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). '\n    if bpr > 0.98:  # threshold to recompute\n        LOGGER.info(emojis(f'{s}Current anchors are a good fit to dataset ✅'))\n    else:\n        LOGGER.info(emojis(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...'))\n        na = m.anchors.numel() // 2  # number of anchors\n        try:\n            anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)\n        except Exception as e:\n            LOGGER.info(f'{PREFIX}ERROR: {e}')\n        new_bpr = metric(anchors)[0]\n        if new_bpr > bpr:  # replace anchors\n            anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)\n            m.anchors[:] = anchors.clone().view_as(m.anchors)\n            check_anchor_order(m)  # must be in pixel-space (not grid-space)\n            m.anchors /= stride\n            s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)'\n        else:\n            s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)'\n        LOGGER.info(emojis(s))\n\n\ndef kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):\n    \"\"\" Creates kmeans-evolved anchors from training dataset\n\n        Arguments:\n            dataset: path to data.yaml, or a loaded dataset\n            n: number of anchors\n            img_size: image size used for training\n            thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0\n            gen: generations to evolve anchors using genetic algorithm\n            verbose: print all results\n\n        Return:\n            k: kmeans evolved anchors\n\n        Usage:\n            from utils.autoanchor import *; _ = kmean_anchors()\n    \"\"\"\n    from scipy.cluster.vq import kmeans\n\n    npr = np.random\n    thr = 1 / thr\n\n    def metric(k, wh):  # compute metrics\n        r = wh[:, None] / k[None]\n        x = torch.min(r, 1 / r).min(2)[0]  # ratio metric\n        # x = wh_iou(wh, torch.tensor(k))  # iou metric\n        return x, x.max(1)[0]  # x, best_x\n\n    def anchor_fitness(k):  # mutation fitness\n        _, best = metric(torch.tensor(k, dtype=torch.float32), wh)\n        return (best * (best > thr).float()).mean()  # fitness\n\n    def print_results(k, verbose=True):\n        k = k[np.argsort(k.prod(1))]  # sort small to large\n        x, best = metric(k, wh0)\n        bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n  # best possible recall, anch > thr\n        s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\\n' \\\n            f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \\\n            f'past_thr={x[x > thr].mean():.3f}-mean: '\n        for x in k:\n            s += '%i,%i, ' % (round(x[0]), round(x[1]))\n        if verbose:\n            LOGGER.info(s[:-2])\n        return k\n\n    if isinstance(dataset, str):  # *.yaml file\n        with open(dataset, errors='ignore') as f:\n            data_dict = yaml.safe_load(f)  # model dict\n        from utils.dataloaders import LoadImagesAndLabels\n        dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)\n\n    # Get label wh\n    shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)\n    wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)])  # wh\n\n    # Filter\n    i = (wh0 < 3.0).any(1).sum()\n    if i:\n        LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size')\n    wh = wh0[(wh0 >= 2.0).any(1)]  # filter > 2 pixels\n    # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1)  # multiply by random scale 0-1\n\n    # Kmeans init\n    try:\n        LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...')\n        assert n <= len(wh)  # apply overdetermined constraint\n        s = wh.std(0)  # sigmas for whitening\n        k = kmeans(wh / s, n, iter=30)[0] * s  # points\n        assert n == len(k)  # kmeans may return fewer points than requested if wh is insufficient or too similar\n    except Exception:\n        LOGGER.warning(f'{PREFIX}WARNING: switching strategies from kmeans to random init')\n        k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size  # random init\n    wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0))\n    k = print_results(k, verbose=False)\n\n    # Plot\n    # k, d = [None] * 20, [None] * 20\n    # for i in tqdm(range(1, 21)):\n    #     k[i-1], d[i-1] = kmeans(wh / s, i)  # points, mean distance\n    # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)\n    # ax = ax.ravel()\n    # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')\n    # fig, ax = plt.subplots(1, 2, figsize=(14, 7))  # plot wh\n    # ax[0].hist(wh[wh[:, 0]<100, 0],400)\n    # ax[1].hist(wh[wh[:, 1]<100, 1],400)\n    # fig.savefig('wh.png', dpi=200)\n\n    # Evolve\n    f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1  # fitness, generations, mutation prob, sigma\n    pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')  # progress bar\n    for _ in pbar:\n        v = np.ones(sh)\n        while (v == 1).all():  # mutate until a change occurs (prevent duplicates)\n            v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)\n        kg = (k.copy() * v).clip(min=2.0)\n        fg = anchor_fitness(kg)\n        if fg > f:\n            f, k = fg, kg.copy()\n            pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}'\n            if verbose:\n                print_results(k, verbose)\n\n    return print_results(k)\n"
  },
  {
    "path": "module/detect/utils/autobatch.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nAuto-batch utils\n\"\"\"\n\nfrom copy import deepcopy\n\nimport numpy as np\nimport torch\n\nfrom utils.general import LOGGER, colorstr, emojis\nfrom utils.torch_utils import profile\n\n\ndef check_train_batch_size(model, imgsz=640, amp=True):\n    # Check YOLOv5 training batch size\n    with torch.cuda.amp.autocast(amp):\n        return autobatch(deepcopy(model).train(), imgsz)  # compute optimal batch size\n\n\ndef autobatch(model, imgsz=640, fraction=0.9, batch_size=16):\n    # Automatically estimate best batch size to use `fraction` of available CUDA memory\n    # Usage:\n    #     import torch\n    #     from utils.autobatch import autobatch\n    #     model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)\n    #     print(autobatch(model))\n\n    # Check device\n    prefix = colorstr('AutoBatch: ')\n    LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}')\n    device = next(model.parameters()).device  # get model device\n    if device.type == 'cpu':\n        LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')\n        return batch_size\n\n    # Inspect CUDA memory\n    gb = 1 << 30  # bytes to GiB (1024 ** 3)\n    d = str(device).upper()  # 'CUDA:0'\n    properties = torch.cuda.get_device_properties(device)  # device properties\n    t = properties.total_memory / gb  # GiB total\n    r = torch.cuda.memory_reserved(device) / gb  # GiB reserved\n    a = torch.cuda.memory_allocated(device) / gb  # GiB allocated\n    f = t - (r + a)  # GiB free\n    LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')\n\n    # Profile batch sizes\n    batch_sizes = [1, 2, 4, 8, 16]\n    try:\n        img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes]\n        results = profile(img, model, n=3, device=device)\n    except Exception as e:\n        LOGGER.warning(f'{prefix}{e}')\n\n    # Fit a solution\n    y = [x[2] for x in results if x]  # memory [2]\n    p = np.polyfit(batch_sizes[:len(y)], y, deg=1)  # first degree polynomial fit\n    b = int((f * fraction - p[1]) / p[0])  # y intercept (optimal batch size)\n    if None in results:  # some sizes failed\n        i = results.index(None)  # first fail index\n        if b >= batch_sizes[i]:  # y intercept above failure point\n            b = batch_sizes[max(i - 1, 0)]  # select prior safe point\n\n    fraction = np.polyval(p, b) / t  # actual fraction predicted\n    LOGGER.info(emojis(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅'))\n    return b\n"
  },
  {
    "path": "module/detect/utils/aws/__init__.py",
    "content": ""
  },
  {
    "path": "module/detect/utils/aws/mime.sh",
    "content": "# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/\n# This script will run on every instance restart, not only on first start\n# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---\n\nContent-Type: multipart/mixed; boundary=\"//\"\nMIME-Version: 1.0\n\n--//\nContent-Type: text/cloud-config; charset=\"us-ascii\"\nMIME-Version: 1.0\nContent-Transfer-Encoding: 7bit\nContent-Disposition: attachment; filename=\"cloud-config.txt\"\n\n#cloud-config\ncloud_final_modules:\n- [scripts-user, always]\n\n--//\nContent-Type: text/x-shellscript; charset=\"us-ascii\"\nMIME-Version: 1.0\nContent-Transfer-Encoding: 7bit\nContent-Disposition: attachment; filename=\"userdata.txt\"\n\n#!/bin/bash\n# --- paste contents of userdata.sh here ---\n--//\n"
  },
  {
    "path": "module/detect/utils/aws/resume.py",
    "content": "# Resume all interrupted trainings in yolov5/ dir including DDP trainings\n# Usage: $ python utils/aws/resume.py\n\nimport os\nimport sys\nfrom pathlib import Path\n\nimport torch\nimport yaml\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[2]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\n\nport = 0  # --master_port\npath = Path('').resolve()\nfor last in path.rglob('*/**/last.pt'):\n    ckpt = torch.load(last)\n    if ckpt['optimizer'] is None:\n        continue\n\n    # Load opt.yaml\n    with open(last.parent.parent / 'opt.yaml', errors='ignore') as f:\n        opt = yaml.safe_load(f)\n\n    # Get device count\n    d = opt['device'].split(',')  # devices\n    nd = len(d)  # number of devices\n    ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1)  # distributed data parallel\n\n    if ddp:  # multi-GPU\n        port += 1\n        cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} trainfd.py --resume {last}'\n    else:  # single-GPU\n        cmd = f'python trainfd.py --resume {last}'\n\n    cmd += ' > /dev/null 2>&1 &'  # redirect output to dev/null and run in daemon thread\n    print(cmd)\n    os.system(cmd)\n"
  },
  {
    "path": "module/detect/utils/aws/userdata.sh",
    "content": "#!/bin/bash\n# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html\n# This script will run only once on first instance start (for a re-start script see mime.sh)\n# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir\n# Use >300 GB SSD\n\ncd home/ubuntu\nif [ ! -d yolov5 ]; then\n  echo \"Running first-time script.\" # install dependencies, download COCO, pull Docker\n  git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5\n  cd yolov5\n  bash data/scripts/get_coco.sh && echo \"COCO done.\" &\n  sudo docker pull ultralytics/yolov5:latest && echo \"Docker done.\" &\n  python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo \"Requirements done.\" &\n  wait && echo \"All tasks done.\" # finish background tasks\nelse\n  echo \"Running re-start script.\" # resume interrupted runs\n  i=0\n  list=$(sudo docker ps -qa) # container list i.e. $'one\\ntwo\\nthree\\nfour'\n  while IFS= read -r id; do\n    ((i++))\n    echo \"restarting container $i: $id\"\n    sudo docker start $id\n    # sudo docker exec -it $id python train.py --resume # single-GPU\n    sudo docker exec -d $id python utils/aws/resume.py # multi-scenario\n  done <<<\"$list\"\nfi\n"
  },
  {
    "path": "module/detect/utils/benchmarks.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nRun YOLOv5 benchmarks on all supported export formats\n\nFormat                      | `export.py --include`         | Model\n---                         | ---                           | ---\nPyTorch                     | -                             | yolov5s.pt\nTorchScript                 | `torchscript`                 | yolov5s.torchscript\nONNX                        | `onnx`                        | yolov5s.onnx\nOpenVINO                    | `openvino`                    | yolov5s_openvino_model/\nTensorRT                    | `engine`                      | yolov5s.engine\nCoreML                      | `coreml`                      | yolov5s.mlmodel\nTensorFlow SavedModel       | `saved_model`                 | yolov5s_saved_model/\nTensorFlow GraphDef         | `pb`                          | yolov5s.pb\nTensorFlow Lite             | `tflite`                      | yolov5s.tflite\nTensorFlow Edge TPU         | `edgetpu`                     | yolov5s_edgetpu.tflite\nTensorFlow.js               | `tfjs`                        | yolov5s_web_model/\n\nRequirements:\n    $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu  # CPU\n    $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow  # GPU\n    $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com  # TensorRT\n\nUsage:\n    $ python utils/benchmarks.py --weights yolov5s.pt --img 640\n\"\"\"\n\nimport argparse\nimport platform\nimport sys\nimport time\nfrom pathlib import Path\n\nimport pandas as pd\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\n# ROOT = ROOT.relative_to(Path.cwd())  # relative\n\nimport export\nimport val\nfrom utils import notebook_init\nfrom utils.general import LOGGER, check_yaml, file_size, print_args\nfrom utils.torch_utils import select_device\n\n\ndef run(\n        weights=ROOT / 'yolov5s.pt',  # weights path\n        imgsz=640,  # inference size (pixels)\n        batch_size=1,  # batch size\n        data=ROOT / 'data/coco128.yaml',  # dataset.yaml path\n        device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu\n        half=False,  # use FP16 half-precision inference\n        test=False,  # test exports only\n        pt_only=False,  # test PyTorch only\n        hard_fail=False,  # throw error on benchmark failure\n):\n    y, t = [], time.time()\n    device = select_device(device)\n    for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows():  # index, (name, file, suffix, CPU, GPU)\n        try:\n            assert i not in (9, 10), 'inference not supported'  # Edge TPU and TF.js are unsupported\n            assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13'  # CoreML\n            if 'cpu' in device.type:\n                assert cpu, 'inference not supported on CPU'\n            if 'cuda' in device.type:\n                assert gpu, 'inference not supported on GPU'\n\n            # Export\n            if f == '-':\n                w = weights  # PyTorch format\n            else:\n                w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1]  # all others\n            assert suffix in str(w), 'export failed'\n\n            # Validate\n            result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half)\n            metrics = result[0]  # metrics (mp, mr, map50, map, *losses(box, obj, cls))\n            speeds = result[2]  # times (preprocess, inference, postprocess)\n            y.append([name, round(file_size(w), 1), round(metrics[3], 4), round(speeds[1], 2)])  # MB, mAP, t_inference\n        except Exception as e:\n            if hard_fail:\n                assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}'\n            LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}')\n            y.append([name, None, None, None])  # mAP, t_inference\n        if pt_only and i == 0:\n            break  # break after PyTorch\n\n    # Print results\n    LOGGER.info('\\n')\n    parse_opt()\n    notebook_init()  # print system info\n    c = ['Format', 'Size (MB)', 'mAP@0.5:0.95', 'Inference time (ms)'] if map else ['Format', 'Export', '', '']\n    py = pd.DataFrame(y, columns=c)\n    LOGGER.info(f'\\nBenchmarks complete ({time.time() - t:.2f}s)')\n    LOGGER.info(str(py if map else py.iloc[:, :2]))\n    return py\n\n\ndef test(\n        weights=ROOT / 'yolov5s.pt',  # weights path\n        imgsz=640,  # inference size (pixels)\n        batch_size=1,  # batch size\n        data=ROOT / 'data/coco128.yaml',  # dataset.yaml path\n        device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu\n        half=False,  # use FP16 half-precision inference\n        test=False,  # test exports only\n        pt_only=False,  # test PyTorch only\n        hard_fail=False,  # throw error on benchmark failure\n):\n    y, t = [], time.time()\n    device = select_device(device)\n    for i, (name, f, suffix, gpu) in export.export_formats().iterrows():  # index, (name, file, suffix, gpu-capable)\n        try:\n            w = weights if f == '-' else \\\n                export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1]  # weights\n            assert suffix in str(w), 'export failed'\n            y.append([name, True])\n        except Exception:\n            y.append([name, False])  # mAP, t_inference\n\n    # Print results\n    LOGGER.info('\\n')\n    parse_opt()\n    notebook_init()  # print system info\n    py = pd.DataFrame(y, columns=['Format', 'Export'])\n    LOGGER.info(f'\\nExports complete ({time.time() - t:.2f}s)')\n    LOGGER.info(str(py))\n    return py\n\n\ndef parse_opt():\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')\n    parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')\n    parser.add_argument('--batch-size', type=int, default=1, help='batch size')\n    parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')\n    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')\n    parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')\n    parser.add_argument('--test', action='store_true', help='test exports only')\n    parser.add_argument('--pt-only', action='store_true', help='test PyTorch only')\n    parser.add_argument('--hard-fail', action='store_true', help='throw error on benchmark failure')\n    opt = parser.parse_args()\n    opt.data = check_yaml(opt.data)  # check YAML\n    print_args(vars(opt))\n    return opt\n\n\ndef main(opt):\n    test(**vars(opt)) if opt.test else run(**vars(opt))\n\n\nif __name__ == \"__main__\":\n    opt = parse_opt()\n    main(opt)\n"
  },
  {
    "path": "module/detect/utils/callbacks.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nCallback utils\n\"\"\"\n\n\nclass Callbacks:\n    \"\"\"\"\n    Handles all registered callbacks for YOLOv5 Hooks\n    \"\"\"\n\n    def __init__(self):\n        # Define the available callbacks\n        self._callbacks = {\n            'on_pretrain_routine_start': [],\n            'on_pretrain_routine_end': [],\n            'on_train_start': [],\n            'on_train_epoch_start': [],\n            'on_train_batch_start': [],\n            'optimizer_step': [],\n            'on_before_zero_grad': [],\n            'on_train_batch_end': [],\n            'on_train_epoch_end': [],\n            'on_val_start': [],\n            'on_val_batch_start': [],\n            'on_val_image_end': [],\n            'on_val_batch_end': [],\n            'on_val_end': [],\n            'on_fit_epoch_end': [],  # fit = train + eval\n            'on_model_save': [],\n            'on_train_end': [],\n            'on_params_update': [],\n            'teardown': [], }\n        self.stop_training = False  # set True to interrupt training\n\n    def register_action(self, hook, name='', callback=None):\n        \"\"\"\n        Register a new action to a callback hook\n\n        Args:\n            hook: The callback hook name to register the action to\n            name: The name of the action for later reference\n            callback: The callback to fire\n        \"\"\"\n        assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n        assert callable(callback), f\"callback '{callback}' is not callable\"\n        self._callbacks[hook].append({'name': name, 'callback': callback})\n\n    def get_registered_actions(self, hook=None):\n        \"\"\"\"\n        Returns all the registered actions by callback hook\n\n        Args:\n            hook: The name of the hook to check, defaults to all\n        \"\"\"\n        return self._callbacks[hook] if hook else self._callbacks\n\n    def run(self, hook, *args, **kwargs):\n        \"\"\"\n        Loop through the registered actions and fire all callbacks\n\n        Args:\n            hook: The name of the hook to check, defaults to all\n            args: Arguments to receive from YOLOv5\n            kwargs: Keyword Arguments to receive from YOLOv5\n        \"\"\"\n\n        assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n\n        for logger in self._callbacks[hook]:\n            logger['callback'](*args, **kwargs)\n"
  },
  {
    "path": "module/detect/utils/dataloaders.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nDataloaders and dataset utils\n\"\"\"\n\nimport glob\nimport hashlib\nimport json\nimport math\nimport os\nimport random\nimport shutil\nimport time\nfrom itertools import repeat\nfrom multiprocessing.pool import Pool, ThreadPool\nfrom pathlib import Path\nfrom threading import Thread\nfrom urllib.parse import urlparse\nfrom zipfile import ZipFile\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport yaml\nfrom PIL import ExifTags, Image, ImageOps\nfrom torch.utils.data import DataLoader, Dataset, dataloader, distributed\nfrom tqdm import tqdm\nfrom utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective\nfrom utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str,\n                           cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn)\nfrom utils.torch_utils import torch_distributed_zero_first\n\n# Parameters\nHELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'\nIMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'  # include image suffixes\nVID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv'  # include video suffixes\nBAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}'  # tqdm bar format\nLOCAL_RANK = int(os.getenv('LOCAL_RANK', -1))  # https://pytorch.org/docs/stable/elastic/run.html\n\n# Get orientation exif tag\nfor orientation in ExifTags.TAGS.keys():\n    if ExifTags.TAGS[orientation] == 'Orientation':\n        break\n\n\ndef get_hash(paths):\n    # Returns a single hash value of a list of paths (files or dirs)\n    size = sum(os.path.getsize(p) for p in paths if os.path.exists(p))  # sizes\n    h = hashlib.md5(str(size).encode())  # hash sizes\n    h.update(''.join(paths).encode())  # hash paths\n    return h.hexdigest()  # return hash\n\n\ndef exif_size(img):\n    # Returns exif-corrected PIL size\n    s = img.size  # (width, height)\n    try:\n        rotation = dict(img._getexif().items())[orientation]\n        if rotation in [6, 8]:  # rotation 270 or 90\n            s = (s[1], s[0])\n    except Exception:\n        pass\n\n    return s\n\n\ndef exif_transpose(image):\n    \"\"\"\n    Transpose a PIL image accordingly if it has an EXIF Orientation tag.\n    Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()\n\n    :param image: The image to transpose.\n    :return: An image.\n    \"\"\"\n    exif = image.getexif()\n    orientation = exif.get(0x0112, 1)  # default 1\n    if orientation > 1:\n        method = {\n            2: Image.FLIP_LEFT_RIGHT,\n            3: Image.ROTATE_180,\n            4: Image.FLIP_TOP_BOTTOM,\n            5: Image.TRANSPOSE,\n            6: Image.ROTATE_270,\n            7: Image.TRANSVERSE,\n            8: Image.ROTATE_90, }.get(orientation)\n        if method is not None:\n            image = image.transpose(method)\n            del exif[0x0112]\n            image.info[\"exif\"] = exif.tobytes()\n    return image\n\n\ndef seed_worker(worker_id):\n    # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader\n    worker_seed = torch.initial_seed() % 2 ** 32\n    np.random.seed(worker_seed)\n    random.seed(worker_seed)\n\n\ndef create_dataloader(\n        path,\n        imgsz,\n        batch_size,\n        stride,\n        single_cls=False,\n        hyp=None,\n        augment=False,\n        cache=False,\n        pad=0.0,\n        rect=False,\n        rank=-1,\n        workers=8,\n        image_weights=False,\n        quad=False,\n        prefix='',\n        shuffle=False\n        ):\n    if rect and shuffle:\n        LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False')\n        shuffle = False\n    with torch_distributed_zero_first(rank):  # init dataset *.cache only once if DDP\n        dataset = LoadImagesAndLabels(\n            path,\n            imgsz,\n            batch_size,\n            augment=augment,  # augmentation\n            hyp=hyp,  # hyperparameters\n            rect=rect,  # rectangular batches\n            cache_images=cache,\n            single_cls=single_cls,\n            stride=int(stride),\n            pad=pad,\n            image_weights=image_weights,\n            prefix=prefix\n        )\n\n    batch_size = min(batch_size, len(dataset))\n    nd = torch.cuda.device_count()  # number of CUDA devices\n    nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers])  # number of workers\n    sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)\n    loader = DataLoader if image_weights else InfiniteDataLoader  # only DataLoader allows for attribute updates\n    generator = torch.Generator()\n    generator.manual_seed(0)\n    return loader(\n        dataset,\n        batch_size=batch_size,\n        shuffle=shuffle and sampler is None,\n        num_workers=nw,\n        sampler=sampler,\n        pin_memory=True,\n        collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn,\n        worker_init_fn=seed_worker,\n        generator=generator\n        ), dataset\n\n\nclass InfiniteDataLoader(dataloader.DataLoader):\n    \"\"\" Dataloader that reuses workers\n\n    Uses same syntax as vanilla DataLoader\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))\n        self.iterator = super().__iter__()\n\n    def __len__(self):\n        return len(self.batch_sampler.sampler)\n\n    def __iter__(self):\n        for _ in range(len(self)):\n            yield next(self.iterator)\n\n\nclass _RepeatSampler:\n    \"\"\" Sampler that repeats forever\n\n    Args:\n        sampler (Sampler)\n    \"\"\"\n\n    def __init__(self, sampler):\n        self.sampler = sampler\n\n    def __iter__(self):\n        while True:\n            yield from iter(self.sampler)\n\n\nclass LoadImages:\n    # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`\n    def __init__(self, path, img_size=640, stride=32, auto=True):\n        files = []\n        for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:\n            p = str(Path(p).resolve())\n            if '*' in p:\n                files.extend(sorted(glob.glob(p, recursive=True)))  # glob\n            elif os.path.isdir(p):\n                files.extend(sorted(glob.glob(os.path.join(p, '*.*'))))  # dir\n            elif os.path.isfile(p):\n                files.append(p)  # files\n            else:\n                raise FileNotFoundError(f'{p} does not exist')\n\n        images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n        videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n        ni, nv = len(images), len(videos)\n\n        self.img_size = img_size\n        self.stride = stride\n        self.files = images + videos\n        self.nf = ni + nv  # number of files\n        self.video_flag = [False] * ni + [True] * nv\n        self.mode = 'image'\n        self.auto = auto\n        if any(videos):\n            self.new_video(videos[0])  # new video\n        else:\n            self.cap = None\n        assert self.nf > 0, f'No images or videos found in {p}. ' \\\n                            f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n    def __iter__(self):\n        self.count = 0\n        return self\n\n    def __next__(self):\n        if self.count == self.nf:\n            raise StopIteration\n        path = self.files[self.count]\n\n        if self.video_flag[self.count]:\n            # Read video\n            self.mode = 'video'\n            ret_val, img0 = self.cap.read()\n            while not ret_val:\n                self.count += 1\n                self.cap.release()\n                if self.count == self.nf:  # last video\n                    raise StopIteration\n                path = self.files[self.count]\n                self.new_video(path)\n                ret_val, img0 = self.cap.read()\n\n            self.frame += 1\n            s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '\n\n        else:\n            # Read image\n            self.count += 1\n            img0 = cv2.imread(path)  # BGR\n            assert img0 is not None, f'Image Not Found {path}'\n            s = f'image {self.count}/{self.nf} {path}: '\n\n        # Padded resize\n        img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]\n\n        # Convert\n        img = img.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB\n        img = np.ascontiguousarray(img)\n\n        return path, img, img0, self.cap, s\n\n    def new_video(self, path):\n        self.frame = 0\n        self.cap = cv2.VideoCapture(path)\n        self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n    def __len__(self):\n        return self.nf  # number of files\n\n\nclass LoadWebcam:  # for inference\n    # YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0`\n    def __init__(self, pipe='0', img_size=640, stride=32):\n        self.img_size = img_size\n        self.stride = stride\n        self.pipe = eval(pipe) if pipe.isnumeric() else pipe\n        self.cap = cv2.VideoCapture(self.pipe)  # video capture object\n        self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3)  # set buffer size\n\n    def __iter__(self):\n        self.count = -1\n        return self\n\n    def __next__(self):\n        self.count += 1\n        if cv2.waitKey(1) == ord('q'):  # q to quit\n            self.cap.release()\n            cv2.destroyAllWindows()\n            raise StopIteration\n\n        # Read frame\n        ret_val, img0 = self.cap.read()\n        img0 = cv2.flip(img0, 1)  # flip left-right\n\n        # Print\n        assert ret_val, f'Camera Error {self.pipe}'\n        img_path = 'webcam.jpg'\n        s = f'webcam {self.count}: '\n\n        # Padded resize\n        img = letterbox(img0, self.img_size, stride=self.stride)[0]\n\n        # Convert\n        img = img.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB\n        img = np.ascontiguousarray(img)\n\n        return img_path, img, img0, None, s\n\n    def __len__(self):\n        return 0\n\n\nclass LoadStreams:\n    # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP streams`\n    def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):\n        self.mode = 'stream'\n        self.img_size = img_size\n        self.stride = stride\n\n        if os.path.isfile(sources):\n            with open(sources) as f:\n                sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]\n        else:\n            sources = [sources]\n\n        n = len(sources)\n        self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n        self.sources = [clean_str(x) for x in sources]  # clean source names for later\n        self.auto = auto\n        for i, s in enumerate(sources):  # index, source\n            # Start thread to read frames from video stream\n            st = f'{i + 1}/{n}: {s}... '\n            if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'):  # if source is YouTube video\n                check_requirements(('pafy', 'youtube_dl==2020.12.2'))\n                import pafy\n                s = pafy.new(s).getbest(preftype=\"mp4\").url  # YouTube URL\n            s = eval(s) if s.isnumeric() else s  # i.e. s = '0' local webcam\n            if s == 0:\n                assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.'\n                assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.'\n            cap = cv2.VideoCapture(s)\n            assert cap.isOpened(), f'{st}Failed to open {s}'\n            w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n            h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n            fps = cap.get(cv2.CAP_PROP_FPS)  # warning: may return 0 or nan\n            self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf')  # infinite stream fallback\n            self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30  # 30 FPS fallback\n\n            _, self.imgs[i] = cap.read()  # guarantee first frame\n            self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)\n            LOGGER.info(f\"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)\")\n            self.threads[i].start()\n        LOGGER.info('')  # newline\n\n        # check for common shapes\n        s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])\n        self.rect = np.unique(s, axis=0).shape[0] == 1  # rect inference if all shapes equal\n        if not self.rect:\n            LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')\n\n    def update(self, i, cap, stream):\n        # Read stream `i` frames in daemon thread\n        n, f, read = 0, self.frames[i], 1  # frame number, frame array, inference every 'read' frame\n        while cap.isOpened() and n < f:\n            n += 1\n            # _, self.imgs[index] = cap.read()\n            cap.grab()\n            if n % read == 0:\n                success, im = cap.retrieve()\n                if success:\n                    self.imgs[i] = im\n                else:\n                    LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.')\n                    self.imgs[i] = np.zeros_like(self.imgs[i])\n                    cap.open(stream)  # re-open stream if signal was lost\n            time.sleep(0.0)  # wait time\n\n    def __iter__(self):\n        self.count = -1\n        return self\n\n    def __next__(self):\n        self.count += 1\n        if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'):  # q to quit\n            cv2.destroyAllWindows()\n            raise StopIteration\n\n        # Letterbox\n        img0 = self.imgs.copy()\n        img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]\n\n        # Stack\n        img = np.stack(img, 0)\n\n        # Convert\n        img = img[..., ::-1].transpose((0, 3, 1, 2))  # BGR to RGB, BHWC to BCHW\n        img = np.ascontiguousarray(img)\n\n        return self.sources, img, img0, None, ''\n\n    def __len__(self):\n        return len(self.sources)  # 1E12 frames = 32 streams at 30 FPS for 30 years\n\n\ndef img2label_paths(img_paths):\n    # Define label paths as a function of image paths\n    sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}'  # /images/, /labels/ substrings\n    return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]\n\n\nclass LoadImagesAndLabels(Dataset):\n    # YOLOv5 train_loader/val_loader, loads images and labels for training and validation\n    cache_version = 0.6  # dataset labels *.cache version\n    rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4]\n\n    def __init__(\n            self,\n            path,\n            img_size=640,\n            batch_size=16,\n            augment=False,\n            hyp=None,\n            rect=False,\n            image_weights=False,\n            cache_images=False,\n            single_cls=False,\n            stride=32,\n            pad=0.0,\n            prefix=''\n            ):\n        self.img_size = img_size\n        self.augment = augment\n        self.hyp = hyp\n        self.image_weights = image_weights\n        self.rect = False if image_weights else rect\n        self.mosaic = self.augment and not self.rect  # load 4 images at a time into a mosaic (only during training)\n        self.mosaic_border = [-img_size // 2, -img_size // 2]\n        self.stride = stride\n        self.path = path\n        self.albumentations = Albumentations() if augment else None\n\n        try:\n            f = []  # image files\n            for p in path if isinstance(path, list) else [path]:\n                p = Path(p)  # os-agnostic\n                if p.is_dir():  # dir\n                    f += glob.glob(str(p / '**' / '*.*'), recursive=True)\n                    # f = list(p.rglob('*.*'))  # pathlib\n                elif p.is_file():  # file\n                    with open(p) as t:\n                        t = t.read().strip().splitlines()\n                        parent = str(p.parent) + os.sep\n                        f += [x.replace('./', parent) if x.startswith('./') else x for x in t]  # local to global path\n                        # f += [p.parent / x.lstrip(os.sep) for x in t]  # local to global path (pathlib)\n                else:\n                    raise FileNotFoundError(f'{prefix}{p} does not exist')\n            self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)\n            # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS])  # pathlib\n            assert self.im_files, f'{prefix}No images found'\n        except Exception as e:\n            raise Exception(f'{prefix}Error loading data from {path}: {e}\\nSee {HELP_URL}')\n\n        # Check cache\n        self.label_files = img2label_paths(self.im_files)  # labels\n        cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')\n        try:\n            cache, exists = np.load(cache_path, allow_pickle=True).item(), True  # load dict\n            assert cache['version'] == self.cache_version  # matches current version\n            assert cache['hash'] == get_hash(self.label_files + self.im_files)  # identical hash\n        except Exception:\n            cache, exists = self.cache_labels(cache_path, prefix), False  # run cache ops\n\n        # Display cache\n        nf, nm, ne, nc, n = cache.pop('results')  # found, missing, empty, corrupt, total\n        if exists and LOCAL_RANK in {-1, 0}:\n            d = f\"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt\"\n            tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT)  # display cache results\n            if cache['msgs']:\n                LOGGER.info('\\n'.join(cache['msgs']))  # display warnings\n        assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'\n\n        # Read cache\n        [cache.pop(k) for k in ('hash', 'version', 'msgs')]  # remove items\n        labels, shapes, self.segments = zip(*cache.values())\n        self.labels = list(labels)\n        self.shapes = np.array(shapes, dtype=np.float64)\n        self.im_files = list(cache.keys())  # update\n        self.label_files = img2label_paths(cache.keys())  # update\n        n = len(shapes)  # number of images\n        bi = np.floor(np.arange(n) / batch_size).astype(np.int)  # batch index\n        nb = bi[-1] + 1  # number of batches\n        self.batch = bi  # batch index of image\n        self.n = n\n        self.indices = range(n)\n\n        # Update labels\n        include_class = []  # filter labels to include only these classes (optional)\n        include_class_array = np.array(include_class).reshape(1, -1)\n        for i, (label, segment) in enumerate(zip(self.labels, self.segments)):\n            if include_class:\n                j = (label[:, 0:1] == include_class_array).any(1)\n                self.labels[i] = label[j]\n                if segment:\n                    self.segments[i] = segment[j]\n            if single_cls:  # single-class training, merge all classes into 0\n                self.labels[i][:, 0] = 0\n                if segment:\n                    self.segments[i][:, 0] = 0\n\n        # Rectangular Training\n        if self.rect:\n            # Sort by aspect ratio\n            s = self.shapes  # wh\n            ar = s[:, 1] / s[:, 0]  # aspect ratio\n            irect = ar.argsort()\n            self.im_files = [self.im_files[i] for i in irect]\n            self.label_files = [self.label_files[i] for i in irect]\n            self.labels = [self.labels[i] for i in irect]\n            self.shapes = s[irect]  # wh\n            ar = ar[irect]\n\n            # Set training image shapes\n            shapes = [[1, 1]] * nb\n            for i in range(nb):\n                ari = ar[bi == i]\n                mini, maxi = ari.min(), ari.max()\n                if maxi < 1:\n                    shapes[i] = [maxi, 1]\n                elif mini > 1:\n                    shapes[i] = [1, 1 / mini]\n\n            self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride\n\n        # Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources)\n        self.ims = [None] * n\n        self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files]\n        if cache_images:\n            gb = 0  # Gigabytes of cached images\n            self.im_hw0, self.im_hw = [None] * n, [None] * n\n            fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image\n            results = ThreadPool(NUM_THREADS).imap(fcn, range(n))\n            pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT, disable=LOCAL_RANK > 0)\n            for i, x in pbar:\n                if cache_images == 'disk':\n                    gb += self.npy_files[i].stat().st_size\n                else:  # 'ram'\n                    self.ims[i], self.im_hw0[i], self.im_hw[i] = x  # im, hw_orig, hw_resized = load_image(self, i)\n                    gb += self.ims[i].nbytes\n                pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'\n            pbar.close()\n\n    def cache_labels(self, path=Path('./labels.cache'), prefix=''):\n        # Cache dataset labels, check images and read shapes\n        x = {}  # dict\n        nm, nf, ne, nc, msgs = 0, 0, 0, 0, []  # number missing, found, empty, corrupt, messages\n        desc = f\"{prefix}Scanning '{path.parent / path.stem}' images and labels...\"\n        with Pool(NUM_THREADS) as pool:\n            pbar = tqdm(\n                pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))),\n                desc=desc,\n                total=len(self.im_files),\n                bar_format=BAR_FORMAT\n                )\n            for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:\n                nm += nm_f\n                nf += nf_f\n                ne += ne_f\n                nc += nc_f\n                if im_file:\n                    x[im_file] = [lb, shape, segments]\n                if msg:\n                    msgs.append(msg)\n                pbar.desc = f\"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt\"\n\n        pbar.close()\n        if msgs:\n            LOGGER.info('\\n'.join(msgs))\n        if nf == 0:\n            LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')\n        x['hash'] = get_hash(self.label_files + self.im_files)\n        x['results'] = nf, nm, ne, nc, len(self.im_files)\n        x['msgs'] = msgs  # warnings\n        x['version'] = self.cache_version  # cache version\n        try:\n            np.save(path, x)  # save cache for next time\n            path.with_suffix('.cache.npy').rename(path)  # remove .npy suffix\n            LOGGER.info(f'{prefix}New cache created: {path}')\n        except Exception as e:\n            LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}')  # not writeable\n        return x\n\n    def __len__(self):\n        return len(self.im_files)\n\n    # def __iter__(self):\n    #     self.count = -1\n    #     print('ran dataset iter')\n    #     #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)\n    #     return self\n\n    def __getitem__(self, index):\n        index = self.indices[index]  # linear, shuffled, or image_weights\n\n        hyp = self.hyp\n        mosaic = self.mosaic and random.random() < hyp['mosaic']\n        if mosaic:\n            # Load mosaic\n            img, labels = self.load_mosaic(index)\n            shapes = None\n\n            # MixUp augmentation\n            if random.random() < hyp['mixup']:\n                img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1)))\n\n        else:\n            # Load image\n            img, (h0, w0), (h, w) = self.load_image(index)\n\n            # Letterbox\n            shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size  # final letterboxed shape\n            img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)\n            shapes = (h0, w0), ((h / h0, w / w0), pad)  # for COCO mAP rescaling\n\n            labels = self.labels[index].copy()\n            if labels.size:  # normalized xywh to pixel xyxy format\n                labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])\n\n            if self.augment:\n                img, labels = random_perspective(\n                    img,\n                    labels,\n                    degrees=hyp['degrees'],\n                    translate=hyp['translate'],\n                    scale=hyp['scale'],\n                    shear=hyp['shear'],\n                    perspective=hyp['perspective']\n                    )\n\n        nl = len(labels)  # number of labels\n        if nl:\n            labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)\n\n        if self.augment:\n            # Albumentations\n            img, labels = self.albumentations(img, labels)\n            nl = len(labels)  # update after albumentations\n\n            # HSV color-space\n            augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])\n\n            # Flip up-down\n            if random.random() < hyp['flipud']:\n                img = np.flipud(img)\n                if nl:\n                    labels[:, 2] = 1 - labels[:, 2]\n\n            # Flip left-right\n            if random.random() < hyp['fliplr']:\n                img = np.fliplr(img)\n                if nl:\n                    labels[:, 1] = 1 - labels[:, 1]\n\n            # Cutouts\n            # labels = cutout(img, labels, p=0.5)\n            # nl = len(labels)  # update after cutout\n\n        labels_out = torch.zeros((nl, 6))\n        if nl:\n            labels_out[:, 1:] = torch.from_numpy(labels)\n\n        # Convert\n        img = img.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB\n        img = np.ascontiguousarray(img)\n\n        return torch.from_numpy(img), labels_out, self.im_files[index], shapes\n\n    def load_image(self, i):\n        # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw)\n        im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i],\n        if im is None:  # not cached in RAM\n            if fn.exists():  # load npy\n                im = np.load(fn)\n            else:  # read image\n                im = cv2.imread(f)  # BGR\n                assert im is not None, f'Image Not Found {f}'\n            h0, w0 = im.shape[:2]  # orig hw\n            r = self.img_size / max(h0, w0)  # ratio\n            if r != 1:  # if sizes are not equal\n                interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA\n                im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp)\n            return im, (h0, w0), im.shape[:2]  # im, hw_original, hw_resized\n        else:\n            return self.ims[i], self.im_hw0[i], self.im_hw[i]  # im, hw_original, hw_resized\n\n    def cache_images_to_disk(self, i):\n        # Saves an image as an *.npy file for faster loading\n        f = self.npy_files[i]\n        if not f.exists():\n            np.save(f.as_posix(), cv2.imread(self.im_files[i]))\n\n    def load_mosaic(self, index):\n        # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic\n        labels4, segments4 = [], []\n        s = self.img_size\n        yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border)  # mosaic center x, y\n        indices = [index] + random.choices(self.indices, k=3)  # 3 additional image indices\n        random.shuffle(indices)\n        for i, index in enumerate(indices):\n            # Load image\n            img, _, (h, w) = self.load_image(index)\n\n            # place img in img4\n            if i == 0:  # top left\n                img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8)  # base image with 4 tiles\n                x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc  # xmin, ymin, xmax, ymax (large image)\n                x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h  # xmin, ymin, xmax, ymax (small image)\n            elif i == 1:  # top right\n                x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc\n                x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h\n            elif i == 2:  # bottom left\n                x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)\n                x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)\n            elif i == 3:  # bottom right\n                x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)\n                x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)\n\n            img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b]  # img4[ymin:ymax, xmin:xmax]\n            padw = x1a - x1b\n            padh = y1a - y1b\n\n            # Labels\n            labels, segments = self.labels[index].copy(), self.segments[index].copy()\n            if labels.size:\n                labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh)  # normalized xywh to pixel xyxy format\n                segments = [xyn2xy(x, w, h, padw, padh) for x in segments]\n            labels4.append(labels)\n            segments4.extend(segments)\n\n        # Concat/clip labels\n        labels4 = np.concatenate(labels4, 0)\n        for x in (labels4[:, 1:], *segments4):\n            np.clip(x, 0, 2 * s, out=x)  # clip when using random_perspective()\n        # img4, labels4 = replicate(img4, labels4)  # replicate\n\n        # Augment\n        img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])\n        img4, labels4 = random_perspective(\n            img4,\n            labels4,\n            segments4,\n            degrees=self.hyp['degrees'],\n            translate=self.hyp['translate'],\n            scale=self.hyp['scale'],\n            shear=self.hyp['shear'],\n            perspective=self.hyp['perspective'],\n            border=self.mosaic_border\n            )  # border to remove\n\n        return img4, labels4\n\n    def load_mosaic9(self, index):\n        # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic\n        labels9, segments9 = [], []\n        s = self.img_size\n        indices = [index] + random.choices(self.indices, k=8)  # 8 additional image indices\n        random.shuffle(indices)\n        hp, wp = -1, -1  # height, width previous\n        for i, index in enumerate(indices):\n            # Load image\n            img, _, (h, w) = self.load_image(index)\n\n            # place img in img9\n            if i == 0:  # center\n                img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8)  # base image with 4 tiles\n                h0, w0 = h, w\n                c = s, s, s + w, s + h  # xmin, ymin, xmax, ymax (base) coordinates\n            elif i == 1:  # top\n                c = s, s - h, s + w, s\n            elif i == 2:  # top right\n                c = s + wp, s - h, s + wp + w, s\n            elif i == 3:  # right\n                c = s + w0, s, s + w0 + w, s + h\n            elif i == 4:  # bottom right\n                c = s + w0, s + hp, s + w0 + w, s + hp + h\n            elif i == 5:  # bottom\n                c = s + w0 - w, s + h0, s + w0, s + h0 + h\n            elif i == 6:  # bottom left\n                c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h\n            elif i == 7:  # left\n                c = s - w, s + h0 - h, s, s + h0\n            elif i == 8:  # top left\n                c = s - w, s + h0 - hp - h, s, s + h0 - hp\n\n            padx, pady = c[:2]\n            x1, y1, x2, y2 = (max(x, 0) for x in c)  # allocate coords\n\n            # Labels\n            labels, segments = self.labels[index].copy(), self.segments[index].copy()\n            if labels.size:\n                labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady)  # normalized xywh to pixel xyxy format\n                segments = [xyn2xy(x, w, h, padx, pady) for x in segments]\n            labels9.append(labels)\n            segments9.extend(segments)\n\n            # Image\n            img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:]  # img9[ymin:ymax, xmin:xmax]\n            hp, wp = h, w  # height, width previous\n\n        # Offset\n        yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border)  # mosaic center x, y\n        img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]\n\n        # Concat/clip labels\n        labels9 = np.concatenate(labels9, 0)\n        labels9[:, [1, 3]] -= xc\n        labels9[:, [2, 4]] -= yc\n        c = np.array([xc, yc])  # centers\n        segments9 = [x - c for x in segments9]\n\n        for x in (labels9[:, 1:], *segments9):\n            np.clip(x, 0, 2 * s, out=x)  # clip when using random_perspective()\n        # img9, labels9 = replicate(img9, labels9)  # replicate\n\n        # Augment\n        img9, labels9 = random_perspective(\n            img9,\n            labels9,\n            segments9,\n            degrees=self.hyp['degrees'],\n            translate=self.hyp['translate'],\n            scale=self.hyp['scale'],\n            shear=self.hyp['shear'],\n            perspective=self.hyp['perspective'],\n            border=self.mosaic_border\n            )  # border to remove\n\n        return img9, labels9\n\n    @staticmethod\n    def collate_fn(batch):\n        im, label, path, shapes = zip(*batch)  # transposed\n        for i, lb in enumerate(label):\n            lb[:, 0] = i  # add target image index for build_targets()\n        return torch.stack(im, 0), torch.cat(label, 0), path, shapes\n\n    @staticmethod\n    def collate_fn4(batch):\n        img, label, path, shapes = zip(*batch)  # transposed\n        n = len(shapes) // 4\n        im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]\n\n        ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]])\n        wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]])\n        s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]])  # scale\n        for i in range(n):  # zidane torch.zeros(16,3,720,1280)  # BCHW\n            i *= 4\n            if random.random() < 0.5:\n                im = F.interpolate(\n                    img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear',\n                    align_corners=False\n                    )[0].type(img[i].type())\n                lb = label[i]\n            else:\n                im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)\n                lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s\n            im4.append(im)\n            label4.append(lb)\n\n        for i, lb in enumerate(label4):\n            lb[:, 0] = i  # add target image index for build_targets()\n\n        return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4\n\n\n# Ancillary functions --------------------------------------------------------------------------------------------------\ndef create_folder(path='./new'):\n    # Create folder\n    if os.path.exists(path):\n        shutil.rmtree(path)  # delete output folder\n    os.makedirs(path)  # make new output folder\n\n\ndef flatten_recursive(path=DATASETS_DIR / 'coco128'):\n    # Flatten a recursive directory by bringing all files to top level\n    new_path = Path(str(path) + '_flat')\n    create_folder(new_path)\n    for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):\n        shutil.copyfile(file, new_path / Path(file).name)\n\n\ndef extract_boxes(path=DATASETS_DIR / 'coco128'):  # from utils.dataloaders import *; extract_boxes()\n    # Convert detection dataset into classification dataset, with one directory per class\n    path = Path(path)  # images dir\n    shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None  # remove existing\n    files = list(path.rglob('*.*'))\n    n = len(files)  # number of files\n    for im_file in tqdm(files, total=n):\n        if im_file.suffix[1:] in IMG_FORMATS:\n            # image\n            im = cv2.imread(str(im_file))[..., ::-1]  # BGR to RGB\n            h, w = im.shape[:2]\n\n            # labels\n            lb_file = Path(img2label_paths([str(im_file)])[0])\n            if Path(lb_file).exists():\n                with open(lb_file) as f:\n                    lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32)  # labels\n\n                for j, x in enumerate(lb):\n                    c = int(x[0])  # class\n                    f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg'  # new filename\n                    if not f.parent.is_dir():\n                        f.parent.mkdir(parents=True)\n\n                    b = x[1:] * [w, h, w, h]  # box\n                    # b[2:] = b[2:].max()  # rectangle to square\n                    b[2:] = b[2:] * 1.2 + 3  # pad\n                    b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)\n\n                    b[[0, 2]] = np.clip(b[[0, 2]], 0, w)  # clip boxes outside of image\n                    b[[1, 3]] = np.clip(b[[1, 3]], 0, h)\n                    assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'\n\n\ndef autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):\n    \"\"\" Autosplit a dataset into train/eval/test splits and save path/autosplit_*.txt files\n    Usage: from utils.dataloaders import *; autosplit()\n    Arguments\n        path:            Path to images directory\n        weights:         Train, eval, test weights (list, tuple)\n        annotated_only:  Only use images with an annotated txt file\n    \"\"\"\n    path = Path(path)  # images dir\n    files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS)  # image files only\n    n = len(files)  # number of files\n    random.seed(0)  # for reproducibility\n    indices = random.choices([0, 1, 2], weights=weights, k=n)  # assign each image to a split\n\n    txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt']  # 3 txt files\n    [(path.parent / x).unlink(missing_ok=True) for x in txt]  # remove existing\n\n    print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)\n    for i, img in tqdm(zip(indices, files), total=n):\n        if not annotated_only or Path(img2label_paths([str(img)])[0]).exists():  # check label\n            with open(path.parent / txt[i], 'a') as f:\n                f.write('./' + img.relative_to(path.parent).as_posix() + '\\n')  # add image to txt file\n\n\ndef verify_image_label(args):\n    # Verify one image-label pair\n    im_file, lb_file, prefix = args\n    nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', []  # number (missing, found, empty, corrupt), message, segments\n    try:\n        # verify images\n        im = Image.open(im_file)\n        im.verify()  # PIL verify\n        shape = exif_size(im)  # image size\n        assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'\n        assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'\n        if im.format.lower() in ('jpg', 'jpeg'):\n            with open(im_file, 'rb') as f:\n                f.seek(-2, 2)\n                if f.read() != b'\\xff\\xd9':  # corrupt JPEG\n                    ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)\n                    msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved'\n\n        # verify labels\n        if os.path.isfile(lb_file):\n            nf = 1  # label found\n            with open(lb_file) as f:\n                lb = [x.split() for x in f.read().strip().splitlines() if len(x)]\n                if any(len(x) > 6 for x in lb):  # is segment\n                    classes = np.array([x[0] for x in lb], dtype=np.float32)\n                    segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb]  # (cls, xy1...)\n                    lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1)  # (cls, xywh)\n                lb = np.array(lb, dtype=np.float32)\n            nl = len(lb)\n            if nl:\n                assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected'\n                assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}'\n                assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}'\n                _, i = np.unique(lb, axis=0, return_index=True)\n                if len(i) < nl:  # duplicate row check\n                    lb = lb[i]  # remove duplicates\n                    if segments:\n                        segments = segments[i]\n                    msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed'\n            else:\n                ne = 1  # label empty\n                lb = np.zeros((0, 5), dtype=np.float32)\n        else:\n            nm = 1  # label missing\n            lb = np.zeros((0, 5), dtype=np.float32)\n        return im_file, lb, shape, segments, nm, nf, ne, nc, msg\n    except Exception as e:\n        nc = 1\n        msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}'\n        return [None, None, None, None, nm, nf, ne, nc, msg]\n\n\ndef dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):\n    \"\"\" Return dataset statistics dictionary with images and instances counts per split per class\n    To run in parent directory: export PYTHONPATH=\"$PWD/yolov5\"\n    Usage1: from utils.dataloaders import *; dataset_stats('coco128.yaml', autodownload=True)\n    Usage2: from utils.dataloaders import *; dataset_stats('path/to/coco128_with_yaml.zip')\n    Arguments\n        path:           Path to data.yaml or data.zip (with data.yaml inside data.zip)\n        autodownload:   Attempt to download dataset if not found locally\n        verbose:        Print stats dictionary\n    \"\"\"\n\n    def _round_labels(labels):\n        # Update labels to integer class and 6 decimal place floats\n        return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]\n\n    def _find_yaml(dir):\n        # Return data.yaml file\n        files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml'))  # try root level first and then recursive\n        assert files, f'No *.yaml file found in {dir}'\n        if len(files) > 1:\n            files = [f for f in files if f.stem == dir.stem]  # prefer *.yaml files that match dir name\n            assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed'\n        assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}'\n        return files[0]\n\n    def _unzip(path):\n        # Unzip data.zip\n        if str(path).endswith('.zip'):  # path is data.zip\n            assert Path(path).is_file(), f'Error unzipping {path}, file not found'\n            ZipFile(path).extractall(path=path.parent)  # unzip\n            dir = path.with_suffix('')  # dataset directory == zip name\n            assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/'\n            return True, str(dir), _find_yaml(dir)  # zipped, data_dir, yaml_path\n        else:  # path is data.yaml\n            return False, None, path\n\n    def _hub_ops(f, max_dim=1920):\n        # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing\n        f_new = im_dir / Path(f).name  # dataset-hub image filename\n        try:  # use PIL\n            im = Image.open(f)\n            r = max_dim / max(im.height, im.width)  # ratio\n            if r < 1.0:  # image too large\n                im = im.resize((int(im.width * r), int(im.height * r)))\n            im.save(f_new, 'JPEG', quality=75, optimize=True)  # save\n        except Exception as e:  # use OpenCV\n            print(f'WARNING: HUB ops PIL failure {f}: {e}')\n            im = cv2.imread(f)\n            im_height, im_width = im.shape[:2]\n            r = max_dim / max(im_height, im_width)  # ratio\n            if r < 1.0:  # image too large\n                im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA)\n            cv2.imwrite(str(f_new), im)\n\n    zipped, data_dir, yaml_path = _unzip(Path(path))\n    try:\n        with open(check_yaml(yaml_path), errors='ignore') as f:\n            data = yaml.safe_load(f)  # data dict\n            if zipped:\n                data['path'] = data_dir  # TODO: should this be dir.resolve()?`\n    except Exception:\n        raise Exception(\"error/HUB/dataset_stats/yaml_load\")\n\n    check_dataset(data, autodownload)  # download dataset if missing\n    hub_dir = Path(data['path'] + ('-hub' if hub else ''))\n    stats = {'nc': data['nc'], 'names': data['names']}  # statistics dictionary\n    for split in 'train', 'eval', 'test':\n        if data.get(split) is None:\n            stats[split] = None  # i.e. no test set\n            continue\n        x = []\n        dataset = LoadImagesAndLabels(data[split])  # load dataset\n        for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):\n            x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))\n        x = np.array(x)  # shape(128x80)\n        stats[split] = {\n            'instance_stats': {\n                'total': int(x.sum()),\n                'per_class': x.sum(0).tolist()},\n            'image_stats': {\n                'total': dataset.n,\n                'unlabelled': int(np.all(x == 0, 1).sum()),\n                'per_class': (x > 0).sum(0).tolist()},\n            'labels': [{\n                str(Path(k).name): _round_labels(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]}\n\n        if hub:\n            im_dir = hub_dir / 'images'\n            im_dir.mkdir(parents=True, exist_ok=True)\n            for _ in tqdm(ThreadPool(NUM_THREADS).imap(_hub_ops, dataset.im_files), total=dataset.n, desc='HUB Ops'):\n                pass\n\n    # Profile\n    stats_path = hub_dir / 'stats.json'\n    if profile:\n        for _ in range(1):\n            file = stats_path.with_suffix('.npy')\n            t1 = time.time()\n            np.save(file, stats)\n            t2 = time.time()\n            x = np.load(file, allow_pickle=True)\n            print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')\n\n            file = stats_path.with_suffix('.json')\n            t1 = time.time()\n            with open(file, 'w') as f:\n                json.dump(stats, f)  # save stats *.json\n            t2 = time.time()\n            with open(file) as f:\n                x = json.load(f)  # load hyps dict\n            print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')\n\n    # Save, print and return\n    if hub:\n        print(f'Saving {stats_path.resolve()}...')\n        with open(stats_path, 'w') as f:\n            json.dump(stats, f)  # save stats.json\n    if verbose:\n        print(json.dumps(stats, indent=2, sort_keys=False))\n    return stats\n"
  },
  {
    "path": "module/detect/utils/docker/Dockerfile",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n# Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5\n# Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference\n\n# Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch\nFROM nvcr.io/nvidia/pytorch:22.06-py3\nRUN rm -rf /opt/pytorch  # remove 1.2GB dir\n\n# Downloads to user config dir\nADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/\n\n# Install linux packages\nRUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx\n\n# Install pip packages\nCOPY requirements.txt .\nRUN python -m pip install --upgrade pip wheel\nRUN pip uninstall -y Pillow torchtext  # torch torchvision\nRUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook Pillow>=9.1.0 \\\n    'opencv-python<4.6.0.66' \\\n    --extra-index-url https://download.pytorch.org/whl/cu113\n\n# Create working directory\nRUN mkdir -p /usr/src/app\nWORKDIR /usr/src/app\n\n# Copy contents\nCOPY . /usr/src/app\nRUN git clone https://github.com/ultralytics/yolov5 /usr/src/yolov5\n\n# Set environment variables\nENV OMP_NUM_THREADS=8\n\n\n# Usage Examples -------------------------------------------------------------------------------------------------------\n\n# Build and Push\n# t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t\n\n# Pull and Run\n# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t\n\n# Pull and Run with local directory access\n# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v \"$(pwd)\"/datasets:/usr/src/datasets $t\n\n# Kill all\n# sudo docker kill $(sudo docker ps -q)\n\n# Kill all image-based\n# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest)\n\n# Bash into running container\n# sudo docker exec -it 5a9b5863d93d bash\n\n# Bash into stopped container\n# id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash\n\n# Clean up\n# docker system prune -a --volumes\n\n# Update Ubuntu drivers\n# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/\n\n# DDP test\n# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 trainfd.py --epochs 3\n\n# GCP VM from Image\n# docker.io/ultralytics/yolov5:latest\n"
  },
  {
    "path": "module/detect/utils/docker/Dockerfile-arm64",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n# Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5\n# Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi\n\n# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu\nFROM arm64v8/ubuntu:20.04\n\n# Downloads to user config dir\nADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/\n\n# Install linux packages\nRUN apt update\nRUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata\nRUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc \\\n    libgl1-mesa-glx libglib2.0-0 libpython3.8-dev\n# RUN alias python=python3\n\n# Install pip packages\nCOPY requirements.txt .\nRUN python3 -m pip install --upgrade pip wheel\nRUN pip install --no-cache -r requirements.txt gsutil notebook \\\n    tensorflow-aarch64\n    # tensorflowjs \\\n    # onnx onnx-simplifier onnxruntime \\\n    # coremltools openvino-dev \\\n\n# Create working directory\nRUN mkdir -p /usr/src/app\nWORKDIR /usr/src/app\n\n# Copy contents\nCOPY . /usr/src/app\nRUN git clone https://github.com/ultralytics/yolov5 /usr/src/yolov5\n\n\n# Usage Examples -------------------------------------------------------------------------------------------------------\n\n# Build and Push\n# t=ultralytics/yolov5:latest-M1 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t\n\n# Pull and Run\n# t=ultralytics/yolov5:latest-M1 && sudo docker pull $t && sudo docker run -it --ipc=host -v \"$(pwd)\"/datasets:/usr/src/datasets $t\n"
  },
  {
    "path": "module/detect/utils/docker/Dockerfile-cpu",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n# Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5\n# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments\n\n# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu\nFROM ubuntu:20.04\n\n# Downloads to user config dir\nADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/\n\n# Install linux packages\nRUN apt update\nRUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata\nRUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3.8-dev\n# RUN alias python=python3\n\n# Install pip packages\nCOPY requirements.txt .\nRUN python3 -m pip install --upgrade pip wheel\nRUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \\\n    coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu tensorflowjs \\\n    --extra-index-url https://download.pytorch.org/whl/cpu\n\n# Create working directory\nRUN mkdir -p /usr/src/app\nWORKDIR /usr/src/app\n\n# Copy contents\nCOPY . /usr/src/app\nRUN git clone https://github.com/ultralytics/yolov5 /usr/src/yolov5\n\n\n# Usage Examples -------------------------------------------------------------------------------------------------------\n\n# Build and Push\n# t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t\n\n# Pull and Run\n# t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v \"$(pwd)\"/datasets:/usr/src/datasets $t\n"
  },
  {
    "path": "module/detect/utils/downloads.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nDownload utils\n\"\"\"\n\nimport logging\nimport os\nimport platform\nimport subprocess\nimport time\nimport urllib\nfrom pathlib import Path\nfrom zipfile import ZipFile\n\nimport requests\nimport torch\n\n\ndef is_url(url):\n    # Check if online file exists\n    try:\n        r = urllib.request.urlopen(url)  # response\n        return r.getcode() == 200\n    except urllib.request.HTTPError:\n        return False\n\n\ndef gsutil_getsize(url=''):\n    # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du\n    s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8')\n    return eval(s.split(' ')[0]) if len(s) else 0  # bytes\n\n\ndef safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):\n    # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes\n    from utils.general import LOGGER\n\n    file = Path(file)\n    assert_msg = f\"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}\"\n    try:  # url1\n        LOGGER.info(f'Downloading {url} to {file}...')\n        torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO)\n        assert file.exists() and file.stat().st_size > min_bytes, assert_msg  # check\n    except Exception as e:  # url2\n        file.unlink(missing_ok=True)  # remove partial downloads\n        LOGGER.info(f'ERROR: {e}\\nRe-attempting {url2 or url} to {file}...')\n        os.system(f\"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -\")  # curl download, retry and resume on fail\n    finally:\n        if not file.exists() or file.stat().st_size < min_bytes:  # check\n            file.unlink(missing_ok=True)  # remove partial downloads\n            LOGGER.info(f\"ERROR: {assert_msg}\\n{error_msg}\")\n        LOGGER.info('')\n\n\ndef attempt_download(file, repo='ultralytics/yolov5', release='v6.1'):\n    # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.1', etc.\n    from utils.general import LOGGER\n\n    def github_assets(repository, version='latest'):\n        # Return GitHub repo tag (i.e. 'v6.1') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...])\n        if version != 'latest':\n            version = f'tags/{version}'  # i.e. tags/v6.1\n        response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json()  # github api\n        return response['tag_name'], [x['name'] for x in response['assets']]  # tag, assets\n\n    file = Path(str(file).strip().replace(\"'\", ''))\n    if not file.exists():\n        # URL specified\n        name = Path(urllib.parse.unquote(str(file))).name  # decode '%2F' to '/' etc.\n        if str(file).startswith(('http:/', 'https:/')):  # download\n            url = str(file).replace(':/', '://')  # Pathlib turns :// -> :/\n            file = name.split('?')[0]  # parse authentication https://url.com/file.txt?auth...\n            if Path(file).is_file():\n                LOGGER.info(f'Found {url} locally at {file}')  # file already exists\n            else:\n                safe_download(file=file, url=url, min_bytes=1E5)\n            return file\n\n        # GitHub assets\n        assets = [\n            'yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov5n6.pt', 'yolov5s6.pt',\n            'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt']\n        try:\n            tag, assets = github_assets(repo, release)\n        except Exception:\n            try:\n                tag, assets = github_assets(repo)  # latest release\n            except Exception:\n                try:\n                    tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]\n                except Exception:\n                    tag = release\n\n        file.parent.mkdir(parents=True, exist_ok=True)  # make parent dir (if required)\n        if name in assets:\n            url3 = 'https://drive.google.com/drive/folders/1EFQTEUeXWSFww0luse2jB9M1QNZQGwNl'  # backup gdrive mirror\n            safe_download(\n                file,\n                url=f'https://github.com/{repo}/releases/download/{tag}/{name}',\n                url2=f'https://storage.googleapis.com/{repo}/{tag}/{name}',  # backup url (optional)\n                min_bytes=1E5,\n                error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}')\n\n    return str(file)\n\n\ndef gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'):\n    # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download()\n    t = time.time()\n    file = Path(file)\n    cookie = Path('cookie')  # gdrive cookie\n    print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='')\n    file.unlink(missing_ok=True)  # remove existing file\n    cookie.unlink(missing_ok=True)  # remove existing cookie\n\n    # Attempt file download\n    out = \"NUL\" if platform.system() == \"Windows\" else \"/dev/null\"\n    os.system(f'curl -c ./cookie -s -L \"drive.google.com/uc?export=download&id={id}\" > {out}')\n    if os.path.exists('cookie'):  # large file\n        s = f'curl -Lb ./cookie \"drive.google.com/uc?export=download&confirm={get_token()}&id={id}\" -o {file}'\n    else:  # small file\n        s = f'curl -s -L -o {file} \"drive.google.com/uc?export=download&id={id}\"'\n    r = os.system(s)  # execute, capture return\n    cookie.unlink(missing_ok=True)  # remove existing cookie\n\n    # Error check\n    if r != 0:\n        file.unlink(missing_ok=True)  # remove partial\n        print('Download error ')  # raise Exception('Download error')\n        return r\n\n    # Unzip if archive\n    if file.suffix == '.zip':\n        print('unzipping... ', end='')\n        ZipFile(file).extractall(path=file.parent)  # unzip\n        file.unlink()  # remove zip\n\n    print(f'Done ({time.time() - t:.1f}s)')\n    return r\n\n\ndef get_token(cookie=\"./cookie\"):\n    with open(cookie) as f:\n        for line in f:\n            if \"download\" in line:\n                return line.split()[-1]\n    return \"\"\n\n\n# Google utils: https://cloud.google.com/storage/docs/reference/libraries ----------------------------------------------\n#\n#\n# def upload_blob(bucket_name, source_file_name, destination_blob_name):\n#     # Uploads a file to a bucket\n#     # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python\n#\n#     storage_client = storage.Client()\n#     bucket = storage_client.get_bucket(bucket_name)\n#     blob = bucket.blob(destination_blob_name)\n#\n#     blob.upload_from_filename(source_file_name)\n#\n#     print('File {} uploaded to {}.'.format(\n#         source_file_name,\n#         destination_blob_name))\n#\n#\n# def download_blob(bucket_name, source_blob_name, destination_file_name):\n#     # Uploads a blob from a bucket\n#     storage_client = storage.Client()\n#     bucket = storage_client.get_bucket(bucket_name)\n#     blob = bucket.blob(source_blob_name)\n#\n#     blob.download_to_filename(destination_file_name)\n#\n#     print('Blob {} downloaded to {}.'.format(\n#         source_blob_name,\n#         destination_file_name))\n"
  },
  {
    "path": "module/detect/utils/flask_rest_api/README.md",
    "content": "# Flask REST API\n\n[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are\ncommonly used to expose Machine Learning (ML)  models to other services. This folder contains an example REST API\ncreated using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/).\n\n## Requirements\n\n[Flask](https://palletsprojects.com/p/flask/) is required. Install with:\n\n```shell\n$ pip install Flask\n```\n\n## Run\n\nAfter Flask installation run:\n\n```shell\n$ python3 restapi.py --port 5000\n```\n\nThen use [curl](https://curl.se/) to perform a request:\n\n```shell\n$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'\n```\n\nThe model inference results are returned as a JSON response:\n\n```json\n[\n  {\n    \"class\": 0,\n    \"confidence\": 0.8900438547,\n    \"height\": 0.9318675399,\n    \"name\": \"person\",\n    \"width\": 0.3264600933,\n    \"xcenter\": 0.7438579798,\n    \"ycenter\": 0.5207948685\n  },\n  {\n    \"class\": 0,\n    \"confidence\": 0.8440024257,\n    \"height\": 0.7155083418,\n    \"name\": \"person\",\n    \"width\": 0.6546785235,\n    \"xcenter\": 0.427829951,\n    \"ycenter\": 0.6334488392\n  },\n  {\n    \"class\": 27,\n    \"confidence\": 0.3771208823,\n    \"height\": 0.3902671337,\n    \"name\": \"tie\",\n    \"width\": 0.0696444362,\n    \"xcenter\": 0.3675483763,\n    \"ycenter\": 0.7991207838\n  },\n  {\n    \"class\": 27,\n    \"confidence\": 0.3527112305,\n    \"height\": 0.1540903747,\n    \"name\": \"tie\",\n    \"width\": 0.0336618312,\n    \"xcenter\": 0.7814827561,\n    \"ycenter\": 0.5065554976\n  }\n]\n```\n\nAn example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given\nin `example_request.py`\n"
  },
  {
    "path": "module/detect/utils/flask_rest_api/example_request.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nPerform test request\n\"\"\"\n\nimport pprint\n\nimport requests\n\nDETECTION_URL = \"http://localhost:5000/v1/object-detection/yolov5s\"\nIMAGE = \"zidane.jpg\"\n\n# Read image\nwith open(IMAGE, \"rb\") as f:\n    image_data = f.read()\n\nresponse = requests.post(DETECTION_URL, files={\"image\": image_data}).json()\n\npprint.pprint(response)\n"
  },
  {
    "path": "module/detect/utils/flask_rest_api/restapi.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nRun a Flask REST API exposing a YOLOv5s model\n\"\"\"\n\nimport argparse\nimport io\n\nimport torch\nfrom flask import Flask, request\nfrom PIL import Image\n\napp = Flask(__name__)\n\nDETECTION_URL = \"/v1/object-detection/yolov5s\"\n\n\n@app.route(DETECTION_URL, methods=[\"POST\"])\ndef predict():\n    if request.method != \"POST\":\n        return\n\n    if request.files.get(\"image\"):\n        # Method 1\n        # with request.files[\"image\"] as f:\n        #     im = Image.open(io.BytesIO(f.read()))\n\n        # Method 2\n        im_file = request.files[\"image\"]\n        im_bytes = im_file.read()\n        im = Image.open(io.BytesIO(im_bytes))\n\n        results = model(im, size=640)  # reduce size=320 for faster inference\n        return results.pandas().xyxy[0].to_json(orient=\"records\")\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description=\"Flask API exposing YOLOv5 model\")\n    parser.add_argument(\"--port\", default=5000, type=int, help=\"port number\")\n    opt = parser.parse_args()\n\n    # Fix known issue urllib.error.HTTPError 403: rate limit exceeded https://github.com/ultralytics/yolov5/pull/7210\n    torch.hub._validate_not_a_forked_repo = lambda a, b, c: True\n\n    model = torch.hub.load(\"ultralytics/yolov5\", \"yolov5s\", force_reload=True)  # force_reload to recache\n    app.run(host=\"0.0.0.0\", port=opt.port)  # debug=True causes Restarting with stat\n"
  },
  {
    "path": "module/detect/utils/general.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nGeneral utils\n\"\"\"\n\nimport contextlib\nimport glob\nimport inspect\nimport logging\nimport os\nimport platform\nimport random\nimport re\nimport shutil\nimport signal\nimport threading\nimport time\nimport urllib\nfrom datetime import datetime\nfrom itertools import repeat\nfrom multiprocessing.pool import ThreadPool\nfrom pathlib import Path\nfrom subprocess import check_output\nfrom typing import Optional\nfrom zipfile import ZipFile\n\nimport cv2\nimport math\nimport numpy as np\nimport pandas as pd\nimport pkg_resources as pkg\nimport torch\nimport torchvision\nimport yaml\nfrom utils.downloads import gsutil_getsize\nfrom utils.metrics import box_iou, fitness\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1]  # YOLOv5 root directory\nRANK = int(os.getenv('RANK', -1))\n\n# Settings\nDATASETS_DIR = ROOT.parent / 'datasets'  # YOLOv5 datasets directory\nNUM_THREADS = min(8, max(1, os.cpu_count() - 1))  # number of YOLOv5 multiprocessing threads\nAUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true'  # global auto-install mode\nVERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true'  # global verbose mode\nFONT = 'Arial.ttf'  # https://ultralytics.com/assets/Arial.ttf\n\ntorch.set_printoptions(linewidth=320, precision=5, profile='long')\nnp.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format})  # format short g, %precision=5\npd.options.display.max_columns = 10\ncv2.setNumThreads(0)  # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)\nos.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS)  # NumExpr max threads\nos.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS)  # OpenMP (PyTorch and SciPy)\n\n\ndef is_kaggle():\n    # Is environment a Kaggle Notebook?\n    try:\n        assert os.environ.get('PWD') == '/kaggle/working'\n        assert os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com'\n        return True\n    except AssertionError:\n        return False\n\n\ndef is_writeable(dir, test=False):\n    # Return True if directory has write permissions, test opening a file with write permissions if test=True\n    if not test:\n        return os.access(dir, os.R_OK)  # possible issues on Windows\n    file = Path(dir) / 'tmp.txt'\n    try:\n        with open(file, 'w'):  # open file with write permissions\n            pass\n        file.unlink()  # remove file\n        return True\n    except OSError:\n        return False\n\n\ndef set_logging(name=None, verbose=VERBOSE):\n    # Sets level and returns logger\n    if is_kaggle():\n        for h in logging.root.handlers:\n            logging.root.removeHandler(h)  # remove all handlers associated with the root logger object\n    rank = int(os.getenv('RANK', -1))  # rank in world for Multi-GPU trainings\n    level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR\n    log = logging.getLogger(name)\n    log.setLevel(level)\n    handler = logging.StreamHandler()\n    handler.setFormatter(logging.Formatter(\"%(message)s\"))\n    handler.setLevel(level)\n    log.addHandler(handler)\n\n\n# set_logging()  # run before defining LOGGER\nLOGGER = logging.getLogger()  # define globally (used in train.py, eval.py, detect.py, etc.)\n\n\ndef user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):\n    # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required.\n    env = os.getenv(env_var)\n    if env:\n        path = Path(env)  # use environment variable\n    else:\n        cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'}  # 3 OS dirs\n        path = Path.home() / cfg.get(platform.system(), '')  # OS-specific config dir\n        path = (path if is_writeable(path) else Path('/tmp')) / dir  # GCP and AWS lambda fix, only /tmp is writeable\n    path.mkdir(exist_ok=True)  # make if required\n    return path\n\n\nCONFIG_DIR = user_config_dir()  # Ultralytics settings dir\n\n\nclass Profile(contextlib.ContextDecorator):\n    # Usage: @Profile() decorator or 'with Profile():' context manager\n    def __enter__(self):\n        self.start = time.time()\n\n    def __exit__(self, type, value, traceback):\n        print(f'Profile results: {time.time() - self.start:.5f}s')\n\n\nclass Timeout(contextlib.ContextDecorator):\n    # Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager\n    def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):\n        self.seconds = int(seconds)\n        self.timeout_message = timeout_msg\n        self.suppress = bool(suppress_timeout_errors)\n\n    def _timeout_handler(self, signum, frame):\n        raise TimeoutError(self.timeout_message)\n\n    def __enter__(self):\n        if platform.system() != 'Windows':  # not supported on Windows\n            signal.signal(signal.SIGALRM, self._timeout_handler)  # Set handler for SIGALRM\n            signal.alarm(self.seconds)  # start countdown for SIGALRM to be raised\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        if platform.system() != 'Windows':\n            signal.alarm(0)  # Cancel SIGALRM if it's scheduled\n            if self.suppress and exc_type is TimeoutError:  # Suppress TimeoutError\n                return True\n\n\nclass WorkingDirectory(contextlib.ContextDecorator):\n    # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager\n    def __init__(self, new_dir):\n        self.dir = new_dir  # new dir\n        self.cwd = Path.cwd().resolve()  # current dir\n\n    def __enter__(self):\n        os.chdir(self.dir)\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        os.chdir(self.cwd)\n\n\ndef try_except(func):\n    # try-except function. Usage: @try_except decorator\n    def handler(*args, **kwargs):\n        try:\n            func(*args, **kwargs)\n        except Exception as e:\n            print(e)\n\n    return handler\n\n\ndef threaded(func):\n    # Multi-threads a target function and returns thread. Usage: @threaded decorator\n    def wrapper(*args, **kwargs):\n        thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)\n        thread.start()\n        return thread\n\n    return wrapper\n\n\ndef methods(instance):\n    # Get class/instance methods\n    return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith(\"__\")]\n\n\ndef print_args(args: Optional[dict] = None, show_file=True, show_fcn=False):\n    # Print function arguments (optional args dict)\n    x = inspect.currentframe().f_back  # previous frame\n    file, _, fcn, _, _ = inspect.getframeinfo(x)\n    if args is None:  # get args automatically\n        args, _, _, frm = inspect.getargvalues(x)\n        args = {k: v for k, v in frm.items() if k in args}\n    s = (f'{Path(file).stem}: ' if show_file else '') + (f'{fcn}: ' if show_fcn else '')\n    LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items()))\n\n\ndef init_seeds(seed=0, deterministic=False):\n    # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html\n    # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible\n    import torch.backends.cudnn as cudnn\n\n    if deterministic and check_version(torch.__version__, '1.12.0'):  # https://github.com/ultralytics/yolov5/pull/8213\n        torch.use_deterministic_algorithms(True)\n        os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'\n        os.environ['PYTHONHASHSEED'] = str(seed)\n\n    random.seed(seed)\n    np.random.seed(seed)\n    torch.manual_seed(seed)\n    cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False)\n    torch.cuda.manual_seed(seed)\n    torch.cuda.manual_seed_all(seed)  # for Multi-GPU, exception safe\n\n\ndef intersect_dicts(da, db, exclude=()):\n    # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values\n    return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}\n\n\ndef get_latest_run(search_dir='.'):\n    # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)\n    last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)\n    return max(last_list, key=os.path.getctime) if last_list else ''\n\n\ndef is_docker():\n    # Is environment a Docker container?\n    return Path('/workspace').exists()  # or Path('/.dockerenv').exists()\n\n\ndef is_colab():\n    # Is environment a Google Colab instance?\n    try:\n        import google.colab\n        return True\n    except ImportError:\n        return False\n\n\ndef is_pip():\n    # Is file in a pip package?\n    return 'site-packages' in Path(__file__).resolve().parts\n\n\ndef is_ascii(s=''):\n    # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7)\n    s = str(s)  # convert list, tuple, None, etc. to str\n    return len(s.encode().decode('ascii', 'ignore')) == len(s)\n\n\ndef is_chinese(s='人工智能'):\n    # Is string composed of any Chinese characters?\n    return bool(re.search('[\\u4e00-\\u9fff]', str(s)))\n\n\ndef emojis(str=''):\n    # Return platform-dependent emoji-safe version of string\n    return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str\n\n\ndef file_age(path=__file__):\n    # Return days since last file update\n    dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime))  # delta\n    return dt.days  # + dt.seconds / 86400  # fractional days\n\n\ndef file_date(path=__file__):\n    # Return human-readable file modification date, i.e. '2021-3-26'\n    t = datetime.fromtimestamp(Path(path).stat().st_mtime)\n    return f'{t.year}-{t.month}-{t.day}'\n\n\ndef file_size(path):\n    # Return file/dir size (MB)\n    mb = 1 << 20  # bytes to MiB (1024 ** 2)\n    path = Path(path)\n    if path.is_file():\n        return path.stat().st_size / mb\n    elif path.is_dir():\n        return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb\n    else:\n        return 0.0\n\n\ndef check_online():\n    # Check internet connectivity\n    import socket\n    try:\n        socket.create_connection((\"1.1.1.1\", 443), 5)  # check host accessibility\n        return True\n    except OSError:\n        return False\n\n\ndef git_describe(path=ROOT):  # path must be a directory\n    # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe\n    try:\n        assert (Path(path) / '.git').is_dir()\n        return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1]\n    except Exception:\n        return ''\n\n\n@try_except\n@WorkingDirectory(ROOT)\ndef check_git_status():\n    # Recommend 'git pull' if code is out of date\n    msg = ', for updates see https://github.com/ultralytics/yolov5'\n    s = colorstr('github: ')  # string\n    assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg\n    assert not is_docker(), s + 'skipping check (Docker image)' + msg\n    assert check_online(), s + 'skipping check (offline)' + msg\n\n    cmd = 'git fetch && git config --get remote.origin.url'\n    url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git')  # git fetch\n    branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip()  # checked out\n    n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True))  # commits behind\n    if n > 0:\n        s += f\"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update.\"\n    else:\n        s += f'up to date with {url} ✅'\n    LOGGER.info(emojis(s))  # emoji-safe\n\n\ndef check_python(minimum='3.7.0'):\n    # Check current python version vs. required python version\n    check_version(platform.python_version(), minimum, name='Python ', hard=True)\n\n\ndef check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\n    # Check version vs. required version\n    current, minimum = (pkg.parse_version(x) for x in (current, minimum))\n    result = (current == minimum) if pinned else (current >= minimum)  # bool\n    s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed'  # string\n    if hard:\n        assert result, s  # assert min requirements met\n    if verbose and not result:\n        LOGGER.warning(s)\n    return result\n\n\n@try_except\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=()):\n    # Check installed dependencies meet requirements (pass *.txt file or list of packages)\n    prefix = colorstr('red', 'bold', 'requirements:')\n    check_python()  # check python version\n    if isinstance(requirements, (str, Path)):  # requirements.txt file\n        file = Path(requirements)\n        assert file.exists(), f\"{prefix} {file.resolve()} not found, check failed.\"\n        with file.open() as f:\n            requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]\n    else:  # list or tuple of packages\n        requirements = [x for x in requirements if x not in exclude]\n\n    n = 0  # number of packages updates\n    for i, r in enumerate(requirements):\n        try:\n            pkg.require(r)\n        except Exception:  # DistributionNotFound or VersionConflict if requirements not met\n            s = f\"{prefix} {r} not found and is required by YOLOv5\"\n            if install and AUTOINSTALL:  # check environment variable\n                LOGGER.info(f\"{s}, attempting auto-update...\")\n                try:\n                    assert check_online(), f\"'pip install {r}' skipped (offline)\"\n                    LOGGER.info(check_output(f'pip install \"{r}\" {cmds[i] if cmds else \"\"}', shell=True).decode())\n                    n += 1\n                except Exception as e:\n                    LOGGER.warning(f'{prefix} {e}')\n            else:\n                LOGGER.info(f'{s}. Please install and rerun your command.')\n\n    if n:  # if packages updated\n        source = file.resolve() if 'file' in locals() else requirements\n        s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n            f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n        LOGGER.info(emojis(s))\n\n\ndef check_img_size(imgsz, s=32, floor=0):\n    # Verify image size is a multiple of stride s in each dimension\n    if isinstance(imgsz, int):  # integer i.e. img_size=640\n        new_size = max(make_divisible(imgsz, int(s)), floor)\n    else:  # list i.e. img_size=[640, 480]\n        imgsz = list(imgsz)  # convert to list if tuple\n        new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]\n    if new_size != imgsz:\n        LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')\n    return new_size\n\n\ndef check_imshow():\n    # Check if environment supports image displays\n    try:\n        assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'\n        assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'\n        cv2.imshow('test', np.zeros((1, 1, 3)))\n        cv2.waitKey(1)\n        cv2.destroyAllWindows()\n        cv2.waitKey(1)\n        return True\n    except Exception as e:\n        LOGGER.warning(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\\n{e}')\n        return False\n\n\ndef check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):\n    # Check file(s) for acceptable suffix\n    if file and suffix:\n        if isinstance(suffix, str):\n            suffix = [suffix]\n        for f in file if isinstance(file, (list, tuple)) else [file]:\n            s = Path(f).suffix.lower()  # file suffix\n            if len(s):\n                assert s in suffix, f\"{msg}{f} acceptable suffix is {suffix}\"\n\n\ndef check_yaml(file, suffix=('.yaml', '.yml')):\n    # Search/download YAML file (if necessary) and return path, checking suffix\n    return check_file(file, suffix)\n\n\ndef check_file(file, suffix=''):\n    # Search/download file (if necessary) and return path\n    check_suffix(file, suffix)  # optional\n    file = str(file)  # convert to str()\n    if Path(file).is_file() or not file:  # exists\n        return file\n    elif file.startswith(('http:/', 'https:/')):  # download\n        url = file  # warning: Pathlib turns :// -> :/\n        file = Path(urllib.parse.unquote(file).split('?')[0]).name  # '%2F' to '/', split https://url.com/file.txt?auth\n        if Path(file).is_file():\n            LOGGER.info(f'Found {url} locally at {file}')  # file already exists\n        else:\n            LOGGER.info(f'Downloading {url} to {file}...')\n            torch.hub.download_url_to_file(url, file)\n            assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}'  # check\n        return file\n    else:  # search\n        files = []\n        for d in 'data', 'models', 'utils':  # search directories\n            files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True))  # find file\n        assert len(files), f'File not found: {file}'  # assert file was found\n        assert len(files) == 1, f\"Multiple files match '{file}', specify exact path: {files}\"  # assert unique\n        return files[0]  # return file\n\n\ndef check_font(font=FONT, progress=False):\n    # Download font to CONFIG_DIR if necessary\n    font = Path(font)\n    file = CONFIG_DIR / font.name\n    if not font.exists() and not file.exists():\n        url = \"https://ultralytics.com/assets/\" + font.name\n        LOGGER.info(f'Downloading {url} to {file}...')\n        torch.hub.download_url_to_file(url, str(file), progress=progress)\n\n\ndef check_dataset(data, autodownload=True):\n    # Download, check and/or unzip dataset if not found locally\n\n    # Download (optional)\n    extract_dir = ''\n    if isinstance(data, (str, Path)) and str(data).endswith('.zip'):  # i.e. gs://bucket/dir/coco128.zip\n        download(data, dir=DATASETS_DIR, unzip=True, delete=False, curl=False, threads=1)\n        data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml'))\n        extract_dir, autodownload = data.parent, False\n\n    # Read yaml (optional)\n    if isinstance(data, (str, Path)):\n        with open(data, errors='ignore') as f:\n            data = yaml.safe_load(f)  # dictionary\n\n    # Checks\n    for k in 'train', 'eval', 'nc':\n        assert k in data, emojis(f\"data.yaml '{k}:' field missing ❌\")\n    if 'names' not in data:\n        LOGGER.warning(emojis(\"data.yaml 'names:' field missing ⚠️, assigning default names 'class0', 'class1', etc.\"))\n        data['names'] = [f'class{i}' for i in range(data['nc'])]  # default names\n\n    # Resolve paths\n    path = Path(extract_dir or data.get('path') or '')  # optional 'path' default to '.'\n    if not path.is_absolute():\n        path = (ROOT / path).resolve()\n    for k in 'train', 'eval', 'test':\n        if data.get(k):  # prepend path\n            data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]]\n\n    # Parse yaml\n    train, val, test, s = (data.get(x) for x in ('train', 'eval', 'test', 'download'))\n    if val:\n        val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])]  # eval path\n        if not all(x.exists() for x in val):\n            LOGGER.info(emojis('\\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()]))\n            if not s or not autodownload:\n                raise Exception(emojis('Dataset not found ❌'))\n            t = time.time()\n            root = path.parent if 'path' in data else '..'  # unzip directory i.e. '../'\n            if s.startswith('http') and s.endswith('.zip'):  # URL\n                f = Path(s).name  # filename\n                LOGGER.info(f'Downloading {s} to {f}...')\n                torch.hub.download_url_to_file(s, f)\n                Path(root).mkdir(parents=True, exist_ok=True)  # create root\n                ZipFile(f).extractall(path=root)  # unzip\n                Path(f).unlink()  # remove zip\n                r = None  # success\n            elif s.startswith('bash '):  # bash script\n                LOGGER.info(f'Running {s} ...')\n                r = os.system(s)\n            else:  # python script\n                r = exec(s, {'yaml': data})  # return None\n            dt = f'({round(time.time() - t, 1)}s)'\n            s = f\"success ✅ {dt}, saved to {colorstr('bold', root)}\" if r in (0, None) else f\"failure {dt} ❌\"\n            LOGGER.info(emojis(f\"Dataset download {s}\"))\n    check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True)  # download fonts\n    return data  # dictionary\n\n\ndef check_amp(model):\n    # Check PyTorch Automatic Mixed Precision (AMP) functionality. Return True on correct operation\n    from models.common import AutoShape, DetectMultiBackend\n\n    def amp_allclose(model, im):\n        # All close FP32 vs AMP results\n        m = AutoShape(model, verbose=False)  # model\n        a = m(im).xywhn[0]  # FP32 inference\n        m.amp = True\n        b = m(im).xywhn[0]  # AMP inference\n        return a.shape == b.shape and torch.allclose(a, b, atol=0.1)  # close to 10% absolute tolerance\n\n    prefix = colorstr('AMP: ')\n    device = next(model.parameters()).device  # get model device\n    if device.type == 'cpu':\n        return False  # AMP disabled on CPU\n    f = ROOT / 'data' / 'images' / 'bus.jpg'  # image to check\n    im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3))\n    try:\n        assert amp_allclose(model, im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im)\n        LOGGER.info(emojis(f'{prefix}checks passed ✅'))\n        return True\n    except Exception:\n        help_url = 'https://github.com/ultralytics/yolov5/issues/7908'\n        LOGGER.warning(emojis(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}'))\n        return False\n\n\ndef url2file(url):\n    # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt\n    url = str(Path(url)).replace(':/', '://')  # Pathlib turns :// -> :/\n    return Path(urllib.parse.unquote(url)).name.split('?')[0]  # '%2F' to '/', split https://url.com/file.txt?auth\n\n\ndef download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):\n    # Multi-threaded file download and unzip function, used in data.yaml for autodownload\n    def download_one(url, dir):\n        # Download 1 file\n        success = True\n        f = dir / Path(url).name  # filename\n        if Path(url).is_file():  # exists in current path\n            Path(url).rename(f)  # move to dir\n        elif not f.exists():\n            LOGGER.info(f'Downloading {url} to {f}...')\n            for i in range(retry + 1):\n                if curl:\n                    s = 'sS' if threads > 1 else ''  # silent\n                    r = os.system(f'curl -{s}L \"{url}\" -o \"{f}\" --retry 9 -C -')  # curl download with retry, continue\n                    success = r == 0\n                else:\n                    torch.hub.download_url_to_file(url, f, progress=threads == 1)  # torch download\n                    success = f.is_file()\n                if success:\n                    break\n                elif i < retry:\n                    LOGGER.warning(f'Download failure, retrying {i + 1}/{retry} {url}...')\n                else:\n                    LOGGER.warning(f'Failed to download {url}...')\n\n        if unzip and success and f.suffix in ('.zip', '.gz'):\n            LOGGER.info(f'Unzipping {f}...')\n            if f.suffix == '.zip':\n                ZipFile(f).extractall(path=dir)  # unzip\n            elif f.suffix == '.gz':\n                os.system(f'tar xfz {f} --directory {f.parent}')  # unzip\n            if delete:\n                f.unlink()  # remove zip\n\n    dir = Path(dir)\n    dir.mkdir(parents=True, exist_ok=True)  # make directory\n    if threads > 1:\n        pool = ThreadPool(threads)\n        pool.imap(lambda x: download_one(*x), zip(url, repeat(dir)))  # multi-threaded\n        pool.close()\n        pool.join()\n    else:\n        for u in [url] if isinstance(url, (str, Path)) else url:\n            download_one(u, dir)\n\n\ndef make_divisible(x, divisor):\n    # Returns nearest x divisible by divisor\n    if isinstance(divisor, torch.Tensor):\n        divisor = int(divisor.max())  # to int\n    return math.ceil(x / divisor) * divisor\n\n\ndef clean_str(s):\n    # Cleans a string by replacing special characters with underscore _\n    return re.sub(pattern=\"[|@#!¡·$€%&()=?¿^*;:,¨´><+]\", repl=\"_\", string=s)\n\n\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\n    # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf\n    return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1\n\n\ndef colorstr(*input):\n    # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e.  colorstr('blue', 'hello world')\n    *args, string = input if len(input) > 1 else ('blue', 'bold', input[0])  # color arguments, string\n    colors = {\n        'black': '\\033[30m',  # basic colors\n        'red': '\\033[31m',\n        'green': '\\033[32m',\n        'yellow': '\\033[33m',\n        'blue': '\\033[34m',\n        'magenta': '\\033[35m',\n        'cyan': '\\033[36m',\n        'white': '\\033[37m',\n        'bright_black': '\\033[90m',  # bright colors\n        'bright_red': '\\033[91m',\n        'bright_green': '\\033[92m',\n        'bright_yellow': '\\033[93m',\n        'bright_blue': '\\033[94m',\n        'bright_magenta': '\\033[95m',\n        'bright_cyan': '\\033[96m',\n        'bright_white': '\\033[97m',\n        'end': '\\033[0m',  # misc\n        'bold': '\\033[1m',\n        'underline': '\\033[4m'}\n    return ''.join(colors[x] for x in args) + f'{string}' + colors['end']\n\n\ndef labels_to_class_weights(labels, nc=80):\n    # Get class weights (inverse frequency) from training labels\n    if labels[0] is None:  # no labels loaded\n        return torch.Tensor()\n\n    labels = np.concatenate(labels, 0)  # labels.shape = (866643, 5) for COCO\n    classes = labels[:, 0].astype(int)  # labels = [class xywh]\n    weights = np.bincount(classes, minlength=nc)  # occurrences per class\n\n    # Prepend gridpoint count (for uCE training)\n    # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum()  # gridpoints per image\n    # weights = np.hstack([gpi * len(labels)  - weights.sum() * 9, weights * 9]) ** 0.5  # prepend gridpoints to start\n\n    weights[weights == 0] = 1  # replace empty bins with 1\n    weights = 1 / weights  # number of targets per class\n    weights /= weights.sum()  # normalize\n    return torch.from_numpy(weights).float()\n\n\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\n    # Produces image weights based on class_weights and image contents\n    # Usage: index = random.choices(range(n), weights=image_weights, k=1)  # weighted image sample\n    class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels])\n    return (class_weights.reshape(1, nc) * class_counts).sum(1)\n\n\ndef coco80_to_coco91_class():  # converts 80-index (val2014) to 91-index (paper)\n    # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/\n    # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\\n')\n    # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\\n')\n    # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)]  # darknet to coco\n    # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)]  # coco to darknet\n    return [\n        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,\n        35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n        64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]\n\n\ndef xyxy2xywh(x):\n    # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n    y[:, 0] = (x[:, 0] + x[:, 2]) / 2  # x center\n    y[:, 1] = (x[:, 1] + x[:, 3]) / 2  # y center\n    y[:, 2] = x[:, 2] - x[:, 0]  # width\n    y[:, 3] = x[:, 3] - x[:, 1]  # height\n    return y\n\n\ndef xywh2xyxy(x):\n    # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n    y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x\n    y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y\n    y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x\n    y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y\n    return y\n\n\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\n    # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n    y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw  # top left x\n    y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh  # top left y\n    y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw  # bottom right x\n    y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh  # bottom right y\n    return y\n\n\ndef xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\n    # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right\n    if clip:\n        clip_coords(x, (h - eps, w - eps))  # warning: inplace clip\n    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n    y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w  # x center\n    y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h  # y center\n    y[:, 2] = (x[:, 2] - x[:, 0]) / w  # width\n    y[:, 3] = (x[:, 3] - x[:, 1]) / h  # height\n    return y\n\n\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\n    # Convert normalized segments into pixel segments, shape (n,2)\n    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n    y[:, 0] = w * x[:, 0] + padw  # top left x\n    y[:, 1] = h * x[:, 1] + padh  # top left y\n    return y\n\n\ndef segment2box(segment, width=640, height=640):\n    # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)\n    x, y = segment.T  # segment xy\n    inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)\n    x, y, = x[inside], y[inside]\n    return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4))  # xyxy\n\n\ndef segments2boxes(segments):\n    # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)\n    boxes = []\n    for s in segments:\n        x, y = s.T  # segment xy\n        boxes.append([x.min(), y.min(), x.max(), y.max()])  # cls, xyxy\n    return xyxy2xywh(np.array(boxes))  # cls, xywh\n\n\ndef resample_segments(segments, n=1000):\n    # Up-sample an (n,2) segment\n    for i, s in enumerate(segments):\n        s = np.concatenate((s, s[0:1, :]), axis=0)\n        x = np.linspace(0, len(s) - 1, n)\n        xp = np.arange(len(s))\n        segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T  # segment xy\n    return segments\n\n\ndef scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n    # Rescale coords (xyxy) from img1_shape to img0_shape\n    if ratio_pad is None:  # calculate from img0_shape\n        gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new\n        pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding\n    else:\n        gain = ratio_pad[0][0]\n        pad = ratio_pad[1]\n\n    coords[:, [0, 2]] -= pad[0]  # x padding\n    coords[:, [1, 3]] -= pad[1]  # y padding\n    coords[:, :4] /= gain\n    clip_coords(coords, img0_shape)\n    return coords\n\n\ndef clip_coords(boxes, shape):\n    # Clip bounding xyxy bounding boxes to image shape (height, width)\n    if isinstance(boxes, torch.Tensor):  # faster individually\n        boxes[:, 0].clamp_(0, shape[1])  # x1\n        boxes[:, 1].clamp_(0, shape[0])  # y1\n        boxes[:, 2].clamp_(0, shape[1])  # x2\n        boxes[:, 3].clamp_(0, shape[0])  # y2\n    else:  # np.array (faster grouped)\n        boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1])  # x1, x2\n        boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0])  # y1, y2\n\n\ndef non_max_suppression(\n        prediction,\n        conf_thres=0.25,\n        iou_thres=0.45,\n        classes=None,\n        agnostic=False,\n        multi_label=False,\n        labels=(),\n        max_det=300\n):\n    \"\"\"Non-Maximum Suppression (NMS) on inference results to reject overlapping bounding boxes\n\n    Returns:\n         list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n    \"\"\"\n\n    bs = prediction.shape[0]  # batch size\n    nc = prediction.shape[2] - 5  # number of classes\n    xc = prediction[..., 4] > conf_thres  # candidates\n\n    # Checks\n    assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n    assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n    # Settings\n    # min_wh = 2  # (pixels) minimum box width and height\n    max_wh = 7680  # (pixels) maximum box width and height\n    max_nms = 30000  # maximum number of boxes into torchvision.ops.nms()\n    time_limit = 0.3 + 0.03 * bs  # seconds to quit after\n    redundant = True  # require redundant detections\n    multi_label &= nc > 1  # multiple labels per box (adds 0.5ms/img)\n    merge = False  # use merge-NMS\n\n    t = time.time()\n    output = [torch.zeros((0, 6), device=prediction.device)] * bs\n    for xi, x in enumerate(prediction):  # image index, image inference\n        # Apply constraints\n        # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0  # width-height\n        x = x[xc[xi]]  # confidence\n\n        # Cat apriori labels if autolabelling\n        if labels and len(labels[xi]):\n            lb = labels[xi]\n            v = torch.zeros((len(lb), nc + 5), device=x.device)\n            v[:, :4] = lb[:, 1:5]  # box\n            v[:, 4] = 1.0  # conf\n            v[range(len(lb)), lb[:, 0].long() + 5] = 1.0  # cls\n            x = torch.cat((x, v), 0)\n\n        # If none remain process next image\n        if not x.shape[0]:\n            continue\n\n        # Compute conf\n        x[:, 5:] *= x[:, 4:5]  # conf = obj_conf * cls_conf\n\n        # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n        box = xywh2xyxy(x[:, :4])\n\n        # Detections matrix nx6 (xyxy, conf, cls)\n        if multi_label:\n            i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n            x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n        else:  # best class only\n            conf, j = x[:, 5:].max(1, keepdim=True)\n            x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n        # Filter by class\n        if classes is not None:\n            x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n        # Apply finite constraint\n        # if not torch.isfinite(x).all():\n        #     x = x[torch.isfinite(x).all(1)]\n\n        # Check shape\n        n = x.shape[0]  # number of boxes\n        if not n:  # no boxes\n            continue\n        elif n > max_nms:  # excess boxes\n            x = x[x[:, 4].argsort(descending=True)[:max_nms]]  # sort by confidence\n\n        # Batched NMS\n        c = x[:, 5:6] * (0 if agnostic else max_wh)  # classes\n        boxes, scores = x[:, :4] + c, x[:, 4]  # boxes (offset by class), scores\n        i = torchvision.ops.nms(boxes, scores, iou_thres)  # NMS\n        if i.shape[0] > max_det:  # limit detections\n            i = i[:max_det]\n        if merge and (1 < n < 3E3):  # Merge NMS (boxes merged using weighted mean)\n            # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n            iou = box_iou(boxes[i], boxes) > iou_thres  # iou matrix\n            weights = iou * scores[None]  # box weights\n            x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)  # merged boxes\n            if redundant:\n                i = i[iou.sum(1) > 1]  # require redundancy\n\n        output[xi] = x[i]\n        if (time.time() - t) > time_limit:\n            LOGGER.warning(f'WARNING: NMS time limit {time_limit:.3f}s exceeded')\n            break  # time limit exceeded\n\n    return output\n\n\ndef strip_optimizer(f='best.pt', s=''):  # from utils.general import *; strip_optimizer()\n    # Strip optimizer from 'f' to finalize training, optionally save as 's'\n    x = torch.load(f, map_location=torch.device('cpu'))\n    if x.get('ema'):\n        x['model'] = x['ema']  # replace model with ema\n    for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates':  # keys\n        x[k] = None\n    x['epoch'] = -1\n    x['model'].half()  # to FP16\n    for p in x['model'].parameters():\n        p.requires_grad = False\n    torch.save(x, s or f)\n    mb = os.path.getsize(s or f) / 1E6  # filesize\n    LOGGER.info(f\"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB\")\n\n\ndef print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):\n    evolve_csv = save_dir / 'evolve.csv'\n    evolve_yaml = save_dir / 'hyp_evolve.yaml'\n    keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'eval/box_loss',\n            'eval/obj_loss', 'eval/cls_loss') + tuple(hyp.keys())  # [results + hyps]\n    keys = tuple(x.strip() for x in keys)\n    vals = results + tuple(hyp.values())\n    n = len(keys)\n\n    # Download (optional)\n    if bucket:\n        url = f'gs://{bucket}/evolve.csv'\n        if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0):\n            os.system(f'gsutil cp {url} {save_dir}')  # download evolve.csv if larger than local\n\n    # Log to evolve.csv\n    s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\\n')  # add header\n    with open(evolve_csv, 'a') as f:\n        f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\\n')\n\n    # Save yaml\n    with open(evolve_yaml, 'w') as f:\n        data = pd.read_csv(evolve_csv)\n        data = data.rename(columns=lambda x: x.strip())  # strip keys\n        i = np.argmax(fitness(data.values[:, :4]))  #\n        generations = len(data)\n        f.write(\n            '# YOLOv5 Hyperparameter Evolution Results\\n' + f'# Best generation: {i}\\n' +\n            f'# Last generation: {generations - 1}\\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) +\n            '\\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\\n\\n'\n        )\n        yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False)\n\n    # Print to screen\n    LOGGER.info(\n        prefix + f'{generations} generations finished, current result:\\n' + prefix +\n        ', '.join(f'{x.strip():>20s}' for x in keys) + '\\n' + prefix + ', '.join(\n            f'{x:20.5g}'\n            for x in vals\n        ) + '\\n\\n'\n    )\n\n    if bucket:\n        os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}')  # upload\n\n\ndef apply_classifier(x, model, img, im0):\n    # Apply a second stage classifier to YOLO outputs\n    # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval()\n    im0 = [im0] if isinstance(im0, np.ndarray) else im0\n    for i, d in enumerate(x):  # per image\n        if d is not None and len(d):\n            d = d.clone()\n\n            # Reshape and pad cutouts\n            b = xyxy2xywh(d[:, :4])  # boxes\n            b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1)  # rectangle to square\n            b[:, 2:] = b[:, 2:] * 1.3 + 30  # pad\n            d[:, :4] = xywh2xyxy(b).long()\n\n            # Rescale boxes from img_size to im0 size\n            scale_coords(img.shape[2:], d[:, :4], im0[i].shape)\n\n            # Classes\n            pred_cls1 = d[:, 5].long()\n            ims = []\n            for a in d:\n                cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]\n                im = cv2.resize(cutout, (224, 224))  # BGR\n\n                im = im[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416\n                im = np.ascontiguousarray(im, dtype=np.float32)  # uint8 to float32\n                im /= 255  # 0 - 255 to 0.0 - 1.0\n                ims.append(im)\n\n            pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1)  # classifier prediction\n            x[i] = x[i][pred_cls1 == pred_cls2]  # retain matching class detections\n\n    return x\n\n\ndef increment_path(path, exist_ok=False, sep='', mkdir=False):\n    # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.\n    path = Path(path)  # os-agnostic\n    if path.exists() and not exist_ok:\n        path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')\n\n        # Method 1\n        for n in range(2, 9999):\n            p = f'{path}{sep}{n}{suffix}'  # increment path\n            if not os.path.exists(p):  #\n                break\n        path = Path(p)\n\n        # Method 2 (deprecated)\n        # dirs = glob.glob(f\"{path}{sep}*\")  # similar paths\n        # matches = [re.search(rf\"{path.stem}{sep}(\\d+)\", d) for d in dirs]\n        # i = [int(m.groups()[0]) for m in matches if m]  # indices\n        # n = max(i) + 1 if i else 2  # increment number\n        # path = Path(f\"{path}{sep}{n}{suffix}\")  # increment path\n\n    if mkdir:\n        path.mkdir(parents=True, exist_ok=True)  # make directory\n\n    return path\n\n\n# OpenCV Chinese-friendly functions ------------------------------------------------------------------------------------\nimshow_ = cv2.imshow  # copy to avoid recursion errors\n\n\ndef imread(path, flags=cv2.IMREAD_COLOR):\n    return cv2.imdecode(np.fromfile(path, np.uint8), flags)\n\n\ndef imwrite(path, im):\n    try:\n        cv2.imencode(Path(path).suffix, im)[1].tofile(path)\n        return True\n    except Exception:\n        return False\n\n\ndef imshow(path, im):\n    imshow_(path.encode('unicode_escape').decode(), im)\n\n\ncv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow  # redefine\n\n# Variables ------------------------------------------------------------------------------------------------------------\nNCOLS = 0 if is_docker() else shutil.get_terminal_size().columns  # terminal window size for tqdm\n"
  },
  {
    "path": "module/detect/utils/google_app_engine/Dockerfile",
    "content": "FROM gcr.io/google-appengine/python\n\n# Create a virtualenv for dependencies. This isolates these packages from\n# system-level packages.\n# Use -p python3 or -p python3.7 to select python version. Default is version 2.\nRUN virtualenv /env -p python3\n\n# Setting these environment variables are the same as running\n# source /env/bin/activate.\nENV VIRTUAL_ENV /env\nENV PATH /env/bin:$PATH\n\nRUN apt-get update && apt-get install -y python-opencv\n\n# Copy the application's requirements.txt and run pip to install all\n# dependencies into the virtualenv.\nADD requirements.txt /app/requirements.txt\nRUN pip install -r /app/requirements.txt\n\n# Add the application source code.\nADD . /app\n\n# Run a WSGI server to serve the application. gunicorn must be declared as\n# a dependency in requirements.txt.\nCMD gunicorn -b :$PORT main:app\n"
  },
  {
    "path": "module/detect/utils/google_app_engine/additional_requirements.txt",
    "content": "# add these requirements in your app on top of the existing ones\npip==21.1\nFlask==1.0.2\ngunicorn==19.9.0\n"
  },
  {
    "path": "module/detect/utils/google_app_engine/app.yaml",
    "content": "runtime: custom\nenv: flex\n\nservice: yolov5app\n\nliveness_check:\n  initial_delay_sec: 600\n\nmanual_scaling:\n  instances: 1\nresources:\n  cpu: 1\n  memory_gb: 4\n  disk_size_gb: 20\n"
  },
  {
    "path": "module/detect/utils/loggers/__init__.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nLogging utils\n\"\"\"\n\nimport os\nimport warnings\n\nimport pkg_resources as pkg\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\nfrom utils.general import colorstr, cv2, emojis\nfrom utils.loggers.wandb.wandb_utils import WandbLogger\nfrom utils.plots import plot_images, plot_results\nfrom utils.torch_utils import de_parallel\n\nLOGGERS = ('csv', 'tb', 'wandb')  # text-file, TensorBoard, Weights & Biases\nRANK = int(os.getenv('RANK', -1))\n\ntry:\n    import wandb\n\n    assert hasattr(wandb, '__version__')  # verify package import not local dir\n    if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in {0, -1}:\n        try:\n            wandb_login_success = wandb.login(timeout=30)\n        except wandb.errors.UsageError:  # known non-TTY terminal issue\n            wandb_login_success = False\n        if not wandb_login_success:\n            wandb = None\nexcept (ImportError, AssertionError):\n    wandb = None\n\n\nclass Loggers():\n    # YOLOv5 Loggers class\n    def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):\n        self.save_dir = save_dir\n        self.weights = weights\n        self.opt = opt\n        self.hyp = hyp\n        self.logger = logger  # for printing results to console\n        self.include = include\n        self.keys = [\n            'train/box_loss',\n            'train/obj_loss',\n            'train/cls_loss',  # train loss\n            'metrics/precision',\n            'metrics/recall',\n            'metrics/mAP_0.5',\n            'metrics/mAP_0.5:0.95',  # metrics\n            'eval/box_loss',\n            'eval/obj_loss',\n            'eval/cls_loss',  # eval loss\n            'x/lr0',\n            'x/lr1',\n            'x/lr2']  # params\n        self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95']\n        for k in LOGGERS:\n            setattr(self, k, None)  # init empty logger dictionary\n        self.csv = True  # always log to csv\n\n        # Message\n        if not wandb:\n            prefix = colorstr('Weights & Biases: ')\n            s = f\"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)\"\n            self.logger.info(emojis(s))\n\n        # TensorBoard\n        s = self.save_dir\n        if 'tb' in self.include and not self.opt.evolve:\n            prefix = colorstr('TensorBoard: ')\n            self.logger.info(f\"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/\")\n            self.tb = SummaryWriter(str(s))\n\n        # W&B\n        if wandb and 'wandb' in self.include:\n            wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://')\n            run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None\n            self.opt.hyp = self.hyp  # add hyperparameters\n            self.wandb = WandbLogger(self.opt, run_id)\n            # temp warn. because nested artifacts not supported after 0.12.10\n            if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'):\n                self.logger.warning(\n                    \"YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected.\"\n                )\n        else:\n            self.wandb = None\n\n    def on_train_start(self):\n        # Callback runs on train start\n        pass\n\n    def on_pretrain_routine_end(self):\n        # Callback runs on pre-train routine end\n        paths = self.save_dir.glob('*labels*.jpg')  # training labels\n        if self.wandb:\n            self.wandb.log({\"Labels\": [wandb.Image(str(x), caption=x.name) for x in paths]})\n\n    def on_train_batch_end(self, ni, model, imgs, targets, paths, plots):\n        # Callback runs on train batch end\n        if plots:\n            if ni == 0:\n                if self.tb and not self.opt.sync_bn:  # --sync known issue https://github.com/ultralytics/yolov5/issues/3754\n                    with warnings.catch_warnings():\n                        warnings.simplefilter('ignore')  # suppress jit trace warning\n                        self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])\n            if ni < 3:\n                f = self.save_dir / f'train_batch{ni}.jpg'  # filename\n                plot_images(imgs, targets, paths, f)\n            if self.wandb and ni == 10:\n                files = sorted(self.save_dir.glob('train*.jpg'))\n                self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})\n\n    def on_train_epoch_end(self, epoch):\n        # Callback runs on train epoch end\n        if self.wandb:\n            self.wandb.current_epoch = epoch + 1\n\n    def on_val_image_end(self, pred, predn, path, names, im):\n        # Callback runs on eval image end\n        if self.wandb:\n            self.wandb.val_one_image(pred, predn, path, names, im)\n\n    def on_val_end(self):\n        # Callback runs on eval end\n        if self.wandb:\n            files = sorted(self.save_dir.glob('eval*.jpg'))\n            self.wandb.log({\"Validation\": [wandb.Image(str(f), caption=f.name) for f in files]})\n\n    def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):\n        # Callback runs at the end of each fit (train+eval) epoch\n        x = dict(zip(self.keys, vals))\n        if self.csv:\n            file = self.save_dir / 'results.csv'\n            n = len(x) + 1  # number of cols\n            s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\\n')  # add header\n            with open(file, 'a') as f:\n                f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\\n')\n\n        if self.tb:\n            for k, v in x.items():\n                self.tb.add_scalar(k, v, epoch)\n\n        if self.wandb:\n            if best_fitness == fi:\n                best_results = [epoch] + vals[3:7]\n                for i, name in enumerate(self.best_keys):\n                    self.wandb.wandb_run.summary[name] = best_results[i]  # log best results in the summary\n            self.wandb.log(x)\n            self.wandb.end_epoch(best_result=best_fitness == fi)\n\n    def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):\n        # Callback runs on model save event\n        if self.wandb:\n            if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:\n                self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)\n\n    def on_train_end(self, last, best, plots, epoch, results):\n        # Callback runs on training end\n        if plots:\n            plot_results(file=self.save_dir / 'results.csv')  # save results.png\n        files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]\n        files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()]  # filter\n        self.logger.info(f\"Results saved to {colorstr('bold', self.save_dir)}\")\n\n        if self.tb:\n            for f in files:\n                self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')\n\n        if self.wandb:\n            self.wandb.log(dict(zip(self.keys[3:10], results)))\n            self.wandb.log({\"Results\": [wandb.Image(str(f), caption=f.name) for f in files]})\n            # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model\n            if not self.opt.evolve:\n                wandb.log_artifact(\n                    str(best if best.exists() else last),\n                    type='model',\n                    name=f'run_{self.wandb.wandb_run.id}_model',\n                    aliases=['latest', 'best', 'stripped']\n                    )\n            self.wandb.finish_run()\n\n    def on_params_update(self, params):\n        # Update hyperparams or configs of the experiment\n        # params: A dict containing {param: value} pairs\n        if self.wandb:\n            self.wandb.wandb_run.config.update(params, allow_val_change=True)\n"
  },
  {
    "path": "module/detect/utils/loggers/wandb/README.md",
    "content": "📚 This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 🚀. UPDATED 29 September 2021.\n\n- [About Weights & Biases](#about-weights-&-biases)\n- [First-Time Setup](#first-time-setup)\n- [Viewing runs](#viewing-runs)\n- [Disabling wandb](#disabling-wandb)\n- [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage)\n- [Reports: Share your work with the world!](#reports)\n\n## About Weights & Biases\n\nThink of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With\na few lines of code, save everything you need to debug, compare and reproduce your models — architecture,\nhyperparameters, git commits, model weights, GPU usage, and even datasets and predictions.\n\nUsed by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best\npractices for machine learning. How W&B can help you optimize your machine learning workflows:\n\n- [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2)\n  model performance in real time\n- [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4)\n  visualized automatically\n- [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI)\n  for powerful, extensible visualization\n- [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8)\n  interactively with collaborators\n- [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently\n- [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models\n\n## First-Time Setup\n\n<details open>\n <summary> Toggle Details </summary>\nWhen you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device.\n\nW&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be\nprovided a unique run **name** within that project as project/name. You can also manually set your project and run name\nas:\n\n```shell\n$ python trainfd.py --project ... --name ...\n```\n\nYOLOv5 notebook\nexample: <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n<img width=\"960\" alt=\"Screen Shot 2021-09-29 at 10 23 13 PM\" src=\"https://user-images.githubusercontent.com/26833433/135392431-1ab7920a-c49d-450a-b0b0-0c86ec86100e.png\">\n\n</details>\n\n## Viewing Runs\n\n<details open>\n  <summary> Toggle Details </summary>\nRun information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in <b>realtime</b> . All important information is logged:\n\n- Training & Validation losses\n- Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95\n- Learning Rate over time\n- A bounding box debugging panel, showing the training progress over time\n- GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage**\n- System: Disk I/0, CPU utilization, RAM memory usage\n- Your trained model as W&B Artifact\n- Environment: OS and Python types, Git repository and state, **training command**\n\n<p align=\"center\"><img width=\"900\" alt=\"Weights & Biases dashboard\" src=\"https://user-images.githubusercontent.com/26833433/135390767-c28b050f-8455-4004-adb0-3b730386e2b2.png\"></p>\n</details>\n\n## Disabling wandb\n\n- training after running `wandb disabled` inside that directory creates no wandb run\n  ![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png)\n\n- To enable wandb again, run `wandb online`\n  ![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png)\n\n## Advanced Usage\n\nYou can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training\nevaluations. Here are some quick examples to get you started.\n\n<details open>\n <h3> 1: TrainFD and Log Evaluation simultaneousy </h3>\n   This is an extension of the previous section, but it'll also training after uploading the dataset. <b> This also evaluation Table</b>\n   Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets,\n   so no images will be uploaded from your system more than once.\n <details open>\n  <summary> <b>Usage</b> </summary>\n   <b>Code</b> <code> $ python trainfd.py --upload_data eval</code>\n\n![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png)\n\n</details>\n\n<h3>2. Visualize and Version Datasets</h3>\nLog, visualize, dynamically query, and understand your data with <a href='https://docs.wandb.ai/guides/data-vis/tables'>\nW&B Tables</a>. You can use the following command to log your dataset as a W&B Table. This will generate a <code>\n{dataset}_wandb.yaml</code> file which can be used to train from dataset artifact.\n <details>\n  <summary> <b>Usage</b> </summary>\n   <b>Code</b> <code> $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. </code>\n\n![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png)\n\n</details>\n\n<h3> 3: TrainFD using dataset artifact </h3>\n   When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that\n   can be used to train a model directly from the dataset artifact. <b> This also logs evaluation </b>\n <details>\n  <summary> <b>Usage</b> </summary>\n   <b>Code</b> <code> $ python trainfd.py --data {data}_wandb.yaml </code>\n\n![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)\n\n</details>\n\n<h3> 4: Save model checkpoints as artifacts </h3>\n  To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval.\n  You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged\n\n<details>\n  <summary> <b>Usage</b> </summary>\n   <b>Code</b> <code> $ python trainfd.py --save_period 1 </code>\n\n![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png)\n\n</details>\n\n</details>\n\n<h3> 5: Resume runs from checkpoint artifacts. </h3>\nAny run can be resumed using artifacts if the <code>--resume</code> argument starts with <code>wandb-artifact://</code> prefix followed by the run path, i.e, <code>wandb-artifact://username/project/runid </code>. This doesn't require the model checkpoint to be present on the local system.\n\n<details>\n  <summary> <b>Usage</b> </summary>\n   <b>Code</b> <code> $ python trainfd.py --resume wandb-artifact://{run_path} </code>\n\n![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)\n\n</details>\n\n<h3> 6: Resume runs from dataset artifact & checkpoint artifacts. </h3>\n <b> Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device </b>\n The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot <code>--upload_dataset</code> or\n train from <code>_wandb.yaml</code> file and set <code>--save_period</code>\n\n<details>\n  <summary> <b>Usage</b> </summary>\n   <b>Code</b> <code> $ python trainfd.py --resume wandb-artifact://{run_path} </code>\n\n![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)\n\n</details>\n\n</details>\n\n<h3> Reports </h3>\nW&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)).\n\n<img width=\"900\" alt=\"Weights & Biases Reports\" src=\"https://user-images.githubusercontent.com/26833433/135394029-a17eaf86-c6c1-4b1d-bb80-b90e83aaffa7.png\">\n\n## Environments\n\nYOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies\nincluding [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn)\n, [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n\n- **Google Colab and Kaggle** notebooks with free\n  GPU: <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n- **Google Cloud** Deep Learning VM.\n  See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n- **Docker Image**.\n  See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker Pulls\"></a>\n\n## Status\n\n![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n\nIf this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous\nIntegration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5\ntraining ([trainfd.py](https://github.com/ultralytics/yolov5/blob/master/train.py)),\nvalidation ([eval.py](https://github.com/ultralytics/yolov5/blob/master/val.py)),\ninference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and\nexport ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24\nhours and on every commit.\n"
  },
  {
    "path": "module/detect/utils/loggers/wandb/__init__.py",
    "content": ""
  },
  {
    "path": "module/detect/utils/loggers/wandb/log_dataset.py",
    "content": "import argparse\n\nfrom wandb_utils import WandbLogger\n\nfrom utils.general import LOGGER\n\nWANDB_ARTIFACT_PREFIX = 'wandb-artifact://'\n\n\ndef create_dataset_artifact(opt):\n    logger = WandbLogger(opt, None, job_type='Dataset Creation')  # TODO: return value unused\n    if not logger.wandb:\n        LOGGER.info(\"install wandb using `pip install wandb` to log the dataset\")\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')\n    parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')\n    parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project')\n    parser.add_argument('--entity', default=None, help='W&B entity')\n    parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run')\n\n    opt = parser.parse_args()\n    opt.resume = False  # Explicitly disallow resume check for dataset upload job\n\n    create_dataset_artifact(opt)\n"
  },
  {
    "path": "module/detect/utils/loggers/wandb/sweep.py",
    "content": "import sys\nfrom pathlib import Path\n\nimport wandb\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[3]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\n\nfrom train import parse_opt, train\nfrom utils.callbacks import Callbacks\nfrom utils.general import increment_path\nfrom utils.torch_utils import select_device\n\n\ndef sweep():\n    wandb.init()\n    # Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb.\n    hyp_dict = vars(wandb.config).get(\"_items\").copy()\n\n    # Workaround: get necessary opt args\n    opt = parse_opt(known=True)\n    opt.batch_size = hyp_dict.get(\"batch_size\")\n    opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))\n    opt.epochs = hyp_dict.get(\"epochs\")\n    opt.nosave = True\n    opt.data = hyp_dict.get(\"data\")\n    opt.weights = str(opt.weights)\n    opt.cfg = str(opt.cfg)\n    opt.data = str(opt.data)\n    opt.hyp = str(opt.hyp)\n    opt.project = str(opt.project)\n    device = select_device(opt.device, batch_size=opt.batch_size)\n\n    # train\n    train(hyp_dict, opt, device, callbacks=Callbacks())\n\n\nif __name__ == \"__main__\":\n    sweep()\n"
  },
  {
    "path": "module/detect/utils/loggers/wandb/sweep.yaml",
    "content": "# Hyperparameters for training\n# To set range-\n# Provide min and max values as:\n#      parameter:\n#\n#         min: scalar\n#         max: scalar\n# OR\n#\n# Set a specific list of search space-\n#     parameter:\n#         values: [scalar1, scalar2, scalar3...]\n#\n# You can use grid, bayesian and hyperopt search strategy\n# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration\n\nprogram: utils/loggers/wandb/sweep.py\nmethod: random\nmetric:\n  name: metrics/mAP_0.5\n  goal: maximize\n\nparameters:\n  # hyperparameters: set either min, max range or values list\n  data:\n    value: \"data/coco128.yaml\"\n  batch_size:\n    values: [64]\n  epochs:\n    values: [10]\n\n  lr0:\n    distribution: uniform\n    min: 1e-5\n    max: 1e-1\n  lrf:\n    distribution: uniform\n    min: 0.01\n    max: 1.0\n  momentum:\n    distribution: uniform\n    min: 0.6\n    max: 0.98\n  weight_decay:\n    distribution: uniform\n    min: 0.0\n    max: 0.001\n  warmup_epochs:\n    distribution: uniform\n    min: 0.0\n    max: 5.0\n  warmup_momentum:\n    distribution: uniform\n    min: 0.0\n    max: 0.95\n  warmup_bias_lr:\n    distribution: uniform\n    min: 0.0\n    max: 0.2\n  box:\n    distribution: uniform\n    min: 0.02\n    max: 0.2\n  cls:\n    distribution: uniform\n    min: 0.2\n    max: 4.0\n  cls_pw:\n    distribution: uniform\n    min: 0.5\n    max: 2.0\n  obj:\n    distribution: uniform\n    min: 0.2\n    max: 4.0\n  obj_pw:\n    distribution: uniform\n    min: 0.5\n    max: 2.0\n  iou_t:\n    distribution: uniform\n    min: 0.1\n    max: 0.7\n  anchor_t:\n    distribution: uniform\n    min: 2.0\n    max: 8.0\n  fl_gamma:\n    distribution: uniform\n    min: 0.0\n    max: 4.0\n  hsv_h:\n    distribution: uniform\n    min: 0.0\n    max: 0.1\n  hsv_s:\n    distribution: uniform\n    min: 0.0\n    max: 0.9\n  hsv_v:\n    distribution: uniform\n    min: 0.0\n    max: 0.9\n  degrees:\n    distribution: uniform\n    min: 0.0\n    max: 45.0\n  translate:\n    distribution: uniform\n    min: 0.0\n    max: 0.9\n  scale:\n    distribution: uniform\n    min: 0.0\n    max: 0.9\n  shear:\n    distribution: uniform\n    min: 0.0\n    max: 10.0\n  perspective:\n    distribution: uniform\n    min: 0.0\n    max: 0.001\n  flipud:\n    distribution: uniform\n    min: 0.0\n    max: 1.0\n  fliplr:\n    distribution: uniform\n    min: 0.0\n    max: 1.0\n  mosaic:\n    distribution: uniform\n    min: 0.0\n    max: 1.0\n  mixup:\n    distribution: uniform\n    min: 0.0\n    max: 1.0\n  copy_paste:\n    distribution: uniform\n    min: 0.0\n    max: 1.0\n"
  },
  {
    "path": "module/detect/utils/loggers/wandb/wandb_utils.py",
    "content": "\"\"\"Utilities and tools for tracking runs with Weights & Biases.\"\"\"\n\nimport logging\nimport os\nimport sys\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import Dict\n\nimport yaml\nfrom tqdm import tqdm\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[3]  # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n    sys.path.append(str(ROOT))  # add ROOT to PATH\n\nfrom utils.dataloaders import LoadImagesAndLabels, img2label_paths\nfrom utils.general import LOGGER, check_dataset, check_file\n\ntry:\n    import wandb\n\n    assert hasattr(wandb, '__version__')  # verify package import not local dir\nexcept (ImportError, AssertionError):\n    wandb = None\n\nRANK = int(os.getenv('RANK', -1))\nWANDB_ARTIFACT_PREFIX = 'wandb-artifact://'\n\n\ndef remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX):\n    return from_string[len(prefix):]\n\n\ndef check_wandb_config_file(data_config_file):\n    wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1))  # updated data.yaml path\n    if Path(wandb_config).is_file():\n        return wandb_config\n    return data_config_file\n\n\ndef check_wandb_dataset(data_file):\n    is_trainset_wandb_artifact = False\n    is_valset_wandb_artifact = False\n    if check_file(data_file) and data_file.endswith('.yaml'):\n        with open(data_file, errors='ignore') as f:\n            data_dict = yaml.safe_load(f)\n        is_trainset_wandb_artifact = isinstance(\n            data_dict['train'],\n            str\n            ) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX)\n        is_valset_wandb_artifact = isinstance(\n            data_dict['eval'],\n            str\n            ) and data_dict['eval'].startswith(WANDB_ARTIFACT_PREFIX)\n    if is_trainset_wandb_artifact or is_valset_wandb_artifact:\n        return data_dict\n    else:\n        return check_dataset(data_file)\n\n\ndef get_run_info(run_path):\n    run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX))\n    run_id = run_path.stem\n    project = run_path.parent.stem\n    entity = run_path.parent.parent.stem\n    model_artifact_name = 'run_' + run_id + '_model'\n    return entity, project, run_id, model_artifact_name\n\n\ndef check_wandb_resume(opt):\n    process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None\n    if isinstance(opt.resume, str):\n        if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):\n            if RANK not in [-1, 0]:  # For resuming DDP runs\n                entity, project, run_id, model_artifact_name = get_run_info(opt.resume)\n                api = wandb.Api()\n                artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest')\n                modeldir = artifact.download()\n                opt.weights = str(Path(modeldir) / \"last.pt\")\n            return True\n    return None\n\n\ndef process_wandb_config_ddp_mode(opt):\n    with open(check_file(opt.data), errors='ignore') as f:\n        data_dict = yaml.safe_load(f)  # data dict\n    train_dir, val_dir = None, None\n    if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX):\n        api = wandb.Api()\n        train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias)\n        train_dir = train_artifact.download()\n        train_path = Path(train_dir) / 'data/images/'\n        data_dict['train'] = str(train_path)\n\n    if isinstance(data_dict['eval'], str) and data_dict['eval'].startswith(WANDB_ARTIFACT_PREFIX):\n        api = wandb.Api()\n        val_artifact = api.artifact(remove_prefix(data_dict['eval']) + ':' + opt.artifact_alias)\n        val_dir = val_artifact.download()\n        val_path = Path(val_dir) / 'data/images/'\n        data_dict['eval'] = str(val_path)\n    if train_dir or val_dir:\n        ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml')\n        with open(ddp_data_path, 'w') as f:\n            yaml.safe_dump(data_dict, f)\n        opt.data = ddp_data_path\n\n\nclass WandbLogger():\n    \"\"\"Log training runs, datasets, models, and predictions to Weights & Biases.\n\n    This logger sends information to W&B at wandb.ai. By default, this information\n    includes hyperparameters, system configuration and metrics, model metrics,\n    and basic data metrics and analyses.\n\n    By providing additional command line arguments to train.py, datasets,\n    models and predictions can also be logged.\n\n    For more on how this logger is used, see the Weights & Biases documentation:\n    https://docs.wandb.com/guides/integrations/yolov5\n    \"\"\"\n\n    def __init__(self, opt, run_id=None, job_type='Training'):\n        \"\"\"\n        - Initialize WandbLogger instance\n        - Upload dataset if opt.upload_dataset is True\n        - Setup trainig processes if job_type is 'Training'\n\n        arguments:\n        opt (namespace) -- Commandline arguments for this run\n        run_id (str) -- Run ID of W&B run to be resumed\n        job_type (str) -- To set the job_type for this run\n\n       \"\"\"\n        # Pre-training routine --\n        self.job_type = job_type\n        self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run\n        self.val_artifact, self.train_artifact = None, None\n        self.train_artifact_path, self.val_artifact_path = None, None\n        self.result_artifact = None\n        self.val_table, self.result_table = None, None\n        self.bbox_media_panel_images = []\n        self.val_table_path_map = None\n        self.max_imgs_to_log = 16\n        self.wandb_artifact_data_dict = None\n        self.data_dict = None\n        # It's more elegant to stick to 1 wandb.init call,\n        #  but useful config data is overwritten in the WandbLogger's wandb.init call\n        if isinstance(opt.resume, str):  # checks resume from artifact\n            if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):\n                entity, project, run_id, model_artifact_name = get_run_info(opt.resume)\n                model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name\n                assert wandb, 'install wandb to resume wandb runs'\n                # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config\n                self.wandb_run = wandb.init(\n                    id=run_id,\n                    project=project,\n                    entity=entity,\n                    resume='allow',\n                    allow_val_change=True\n                    )\n                opt.resume = model_artifact_name\n        elif self.wandb:\n            self.wandb_run = wandb.init(\n                config=opt,\n                resume=\"allow\",\n                project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,\n                entity=opt.entity,\n                name=opt.name if opt.name != 'exp' else None,\n                job_type=job_type,\n                id=run_id,\n                allow_val_change=True\n                ) if not wandb.run else wandb.run\n        if self.wandb_run:\n            if self.job_type == 'Training':\n                if opt.upload_dataset:\n                    if not opt.resume:\n                        self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt)\n\n                if opt.resume:\n                    # resume from artifact\n                    if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX):\n                        self.data_dict = dict(self.wandb_run.config.data_dict)\n                    else:  # local resume\n                        self.data_dict = check_wandb_dataset(opt.data)\n                else:\n                    self.data_dict = check_wandb_dataset(opt.data)\n                    self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict\n\n                    # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming.\n                    self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, allow_val_change=True)\n                self.setup_training(opt)\n\n            if self.job_type == 'Dataset Creation':\n                self.wandb_run.config.update({\"upload_dataset\": True})\n                self.data_dict = self.check_and_upload_dataset(opt)\n\n    def check_and_upload_dataset(self, opt):\n        \"\"\"\n        Check if the dataset format is compatible and upload it as W&B artifact\n\n        arguments:\n        opt (namespace)-- Commandline arguments for current run\n\n        returns:\n        Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links.\n        \"\"\"\n        assert wandb, 'Install wandb to upload dataset'\n        config_path = self.log_dataset_artifact(\n            opt.data, opt.single_cls,\n            'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem\n            )\n        with open(config_path, errors='ignore') as f:\n            wandb_data_dict = yaml.safe_load(f)\n        return wandb_data_dict\n\n    def setup_training(self, opt):\n        \"\"\"\n        Setup the necessary processes for training YOLO models:\n          - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX\n          - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded\n          - Setup log_dict, initialize bbox_interval\n\n        arguments:\n        opt (namespace) -- commandline arguments for this run\n\n        \"\"\"\n        self.log_dict, self.current_epoch = {}, 0\n        self.bbox_interval = opt.bbox_interval\n        if isinstance(opt.resume, str):\n            modeldir, _ = self.download_model_artifact(opt)\n            if modeldir:\n                self.weights = Path(modeldir) / \"last.pt\"\n                config = self.wandb_run.config\n                opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str(\n                    self.weights\n                ), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \\\n                                                                                                                  config.hyp, config.imgsz\n        data_dict = self.data_dict\n        if self.val_artifact is None:  # If --upload_dataset is set, use the existing artifact, don't download\n            self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(\n                data_dict.get('train'), opt.artifact_alias\n            )\n            self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(\n                data_dict.get('eval'), opt.artifact_alias\n            )\n\n        if self.train_artifact_path is not None:\n            train_path = Path(self.train_artifact_path) / 'data/images/'\n            data_dict['train'] = str(train_path)\n        if self.val_artifact_path is not None:\n            val_path = Path(self.val_artifact_path) / 'data/images/'\n            data_dict['eval'] = str(val_path)\n\n        if self.val_artifact is not None:\n            self.result_artifact = wandb.Artifact(\"run_\" + wandb.run.id + \"_progress\", \"evaluation\")\n            columns = [\"epoch\", \"id\", \"ground truth\", \"prediction\"]\n            columns.extend(self.data_dict['names'])\n            self.result_table = wandb.Table(columns)\n            self.val_table = self.val_artifact.get(\"eval\")\n            if self.val_table_path_map is None:\n                self.map_val_table_path()\n        if opt.bbox_interval == -1:\n            self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1\n            if opt.evolve or opt.noplots:\n                self.bbox_interval = opt.bbox_interval = opt.epochs + 1  # disable bbox_interval\n        train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None\n        # Update the the data_dict to point to local artifacts dir\n        if train_from_artifact:\n            self.data_dict = data_dict\n\n    def download_dataset_artifact(self, path, alias):\n        \"\"\"\n        download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX\n\n        arguments:\n        path -- path of the dataset to be used for training\n        alias (str)-- alias of the artifact to be download/used for training\n\n        returns:\n        (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset\n        is found otherwise returns (None, None)\n        \"\"\"\n        if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX):\n            artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + \":\" + alias)\n            dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace(\"\\\\\", \"/\"))\n            assert dataset_artifact is not None, \"'Error: W&B dataset artifact doesn\\'t exist'\"\n            datadir = dataset_artifact.download()\n            return datadir, dataset_artifact\n        return None, None\n\n    def download_model_artifact(self, opt):\n        \"\"\"\n        download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX\n\n        arguments:\n        opt (namespace) -- Commandline arguments for this run\n        \"\"\"\n        if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):\n            model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + \":latest\")\n            assert model_artifact is not None, 'Error: W&B model artifact doesn\\'t exist'\n            modeldir = model_artifact.download()\n            # epochs_trained = model_artifact.metadata.get('epochs_trained')\n            total_epochs = model_artifact.metadata.get('total_epochs')\n            is_finished = total_epochs is None\n            assert not is_finished, 'training is finished, can only resume incomplete runs.'\n            return modeldir, model_artifact\n        return None, None\n\n    def log_model(self, path, opt, epoch, fitness_score, best_model=False):\n        \"\"\"\n        Log the model checkpoint as W&B artifact\n\n        arguments:\n        path (Path)   -- Path of directory containing the checkpoints\n        opt (namespace) -- Command line arguments for this run\n        epoch (int)  -- Current epoch number\n        fitness_score (float) -- fitness score for current epoch\n        best_model (boolean) -- Boolean representing if the current checkpoint is the best yet.\n        \"\"\"\n        model_artifact = wandb.Artifact(\n            'run_' + wandb.run.id + '_model',\n            type='model',\n            metadata={\n                'original_url': str(path),\n                'epochs_trained': epoch + 1,\n                'save period': opt.save_period,\n                'project': opt.project,\n                'total_epochs': opt.epochs,\n                'fitness_score': fitness_score}\n            )\n        model_artifact.add_file(str(path / 'last.pt'), name='last.pt')\n        wandb.log_artifact(\n            model_artifact,\n            aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']\n            )\n        LOGGER.info(f\"Saving model artifact on epoch {epoch + 1}\")\n\n    def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):\n        \"\"\"\n        Log the dataset as W&B artifact and return the new data file with W&B links\n\n        arguments:\n        data_file (str) -- the .yaml file with information about the dataset like - path, classes etc.\n        single_class (boolean)  -- train multi-class data as single-class\n        project (str) -- project name. Used to construct the artifact path\n        overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new\n        file with _wandb postfix. Eg -> data_wandb.yaml\n\n        returns:\n        the new .yaml file with artifact links. it can be used to start training directly from artifacts\n        \"\"\"\n        upload_dataset = self.wandb_run.config.upload_dataset\n        log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'eval'\n        self.data_dict = check_dataset(data_file)  # parse and check\n        data = dict(self.data_dict)\n        nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names'])\n        names = {k: v for k, v in enumerate(names)}  # to index dictionary\n\n        # log train set\n        if not log_val_only:\n            self.train_artifact = self.create_dataset_table(\n                LoadImagesAndLabels(data['train'], rect=True, batch_size=1),\n                names,\n                name='train'\n                ) if data.get('train') else None\n            if data.get('train'):\n                data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train')\n\n        self.val_artifact = self.create_dataset_table(\n            LoadImagesAndLabels(data['eval'], rect=True, batch_size=1), names, name='eval'\n        ) if data.get('eval') else None\n        if data.get('eval'):\n            data['eval'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'eval')\n\n        path = Path(data_file)\n        # create a _wandb.yaml file with artifacts links if both train and test set are logged\n        if not log_val_only:\n            path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml'  # updated data.yaml path\n            path = ROOT / 'data' / path\n            data.pop('download', None)\n            data.pop('path', None)\n            with open(path, 'w') as f:\n                yaml.safe_dump(data, f)\n                LOGGER.info(f\"Created dataset config file {path}\")\n\n        if self.job_type == 'Training':  # builds correct artifact pipeline graph\n            if not log_val_only:\n                self.wandb_run.log_artifact(\n                    self.train_artifact\n                )  # calling use_artifact downloads the dataset. NOT NEEDED!\n            self.wandb_run.use_artifact(self.val_artifact)\n            self.val_artifact.wait()\n            self.val_table = self.val_artifact.get('eval')\n            self.map_val_table_path()\n        else:\n            self.wandb_run.log_artifact(self.train_artifact)\n            self.wandb_run.log_artifact(self.val_artifact)\n        return path\n\n    def map_val_table_path(self):\n        \"\"\"\n        Map the validation dataset Table like name of file -> it's id in the W&B Table.\n        Useful for - referencing artifacts for evaluation.\n        \"\"\"\n        self.val_table_path_map = {}\n        LOGGER.info(\"Mapping dataset\")\n        for i, data in enumerate(tqdm(self.val_table.data)):\n            self.val_table_path_map[data[3]] = data[0]\n\n    def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'):\n        \"\"\"\n        Create and return W&B artifact containing W&B Table of the dataset.\n\n        arguments:\n        dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table\n        class_to_id -- hash map that maps class ids to labels\n        name -- name of the artifact\n\n        returns:\n        dataset artifact to be logged or used\n        \"\"\"\n        # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging\n        artifact = wandb.Artifact(name=name, type=\"dataset\")\n        img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None\n        img_files = tqdm(dataset.im_files) if not img_files else img_files\n        for img_file in img_files:\n            if Path(img_file).is_dir():\n                artifact.add_dir(img_file, name='data/images')\n                labels_path = 'labels'.join(dataset.path.rsplit('images', 1))\n                artifact.add_dir(labels_path, name='data/labels')\n            else:\n                artifact.add_file(img_file, name='data/images/' + Path(img_file).name)\n                label_file = Path(img2label_paths([img_file])[0])\n                artifact.add_file(\n                    str(label_file), name='data/labels/' +\n                                          label_file.name\n                    ) if label_file.exists() else None\n        table = wandb.Table(columns=[\"id\", \"train_image\", \"Classes\", \"name\"])\n        class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()])\n        for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)):\n            box_data, img_classes = [], {}\n            for cls, *xywh in labels[:, 1:].tolist():\n                cls = int(cls)\n                box_data.append(\n                    {\n                        \"position\": {\n                            \"middle\": [xywh[0], xywh[1]],\n                            \"width\": xywh[2],\n                            \"height\": xywh[3]},\n                        \"class_id\": cls,\n                        \"box_caption\": \"%s\" % (class_to_id[cls])}\n                )\n                img_classes[cls] = class_to_id[cls]\n            boxes = {\"ground_truth\": {\"box_data\": box_data, \"class_labels\": class_to_id}}  # inference-space\n            table.add_data(\n                si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()),\n                Path(paths).name\n                )\n        artifact.add(table, name)\n        return artifact\n\n    def log_training_progress(self, predn, path, names):\n        \"\"\"\n        Build evaluation Table. Uses reference from validation dataset table.\n\n        arguments:\n        predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class]\n        path (str): local path of the current evaluation image\n        names (dict(int, str)): hash map that maps class ids to labels\n        \"\"\"\n        class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()])\n        box_data = []\n        avg_conf_per_class = [0] * len(self.data_dict['names'])\n        pred_class_count = {}\n        for *xyxy, conf, cls in predn.tolist():\n            if conf >= 0.25:\n                cls = int(cls)\n                box_data.append(\n                    {\n                        \"position\": {\n                            \"minX\": xyxy[0],\n                            \"minY\": xyxy[1],\n                            \"maxX\": xyxy[2],\n                            \"maxY\": xyxy[3]},\n                        \"class_id\": cls,\n                        \"box_caption\": f\"{names[cls]} {conf:.3f}\",\n                        \"scores\": {\n                            \"class_score\": conf},\n                        \"domain\": \"pixel\"}\n                )\n                avg_conf_per_class[cls] += conf\n\n                if cls in pred_class_count:\n                    pred_class_count[cls] += 1\n                else:\n                    pred_class_count[cls] = 1\n\n        for pred_class in pred_class_count.keys():\n            avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class]\n\n        boxes = {\"predictions\": {\"box_data\": box_data, \"class_labels\": names}}  # inference-space\n        id = self.val_table_path_map[Path(path).name]\n        self.result_table.add_data(\n            self.current_epoch, id, self.val_table.data[id][1],\n            wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set),\n            *avg_conf_per_class\n            )\n\n    def val_one_image(self, pred, predn, path, names, im):\n        \"\"\"\n        Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel\n\n        arguments:\n        pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]\n        predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class]\n        path (str): local path of the current evaluation image\n        \"\"\"\n        if self.val_table and self.result_table:  # Log Table if Val dataset is uploaded as artifact\n            self.log_training_progress(predn, path, names)\n\n        if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0:\n            if self.current_epoch % self.bbox_interval == 0:\n                box_data = [{\n                    \"position\": {\n                        \"minX\": xyxy[0],\n                        \"minY\": xyxy[1],\n                        \"maxX\": xyxy[2],\n                        \"maxY\": xyxy[3]},\n                    \"class_id\": int(cls),\n                    \"box_caption\": f\"{names[int(cls)]} {conf:.3f}\",\n                    \"scores\": {\n                        \"class_score\": conf},\n                    \"domain\": \"pixel\"} for *xyxy, conf, cls in pred.tolist()]\n                boxes = {\"predictions\": {\"box_data\": box_data, \"class_labels\": names}}  # inference-space\n                self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name))\n\n    def log(self, log_dict):\n        \"\"\"\n        save the metrics to the logging dictionary\n\n        arguments:\n        log_dict (Dict) -- metrics/media to be logged in current step\n        \"\"\"\n        if self.wandb_run:\n            for key, value in log_dict.items():\n                self.log_dict[key] = value\n\n    def end_epoch(self, best_result=False):\n        \"\"\"\n        commit the log_dict, model artifacts and Tables to W&B and flush the log_dict.\n\n        arguments:\n        best_result (boolean): Boolean representing if the result of this evaluation is best or not\n        \"\"\"\n        if self.wandb_run:\n            with all_logging_disabled():\n                if self.bbox_media_panel_images:\n                    self.log_dict[\"BoundingBoxDebugger\"] = self.bbox_media_panel_images\n                try:\n                    wandb.log(self.log_dict)\n                except BaseException as e:\n                    LOGGER.info(\n                        f\"An error occurred in wandb logger. The training will proceed without interruption. More info\\n{e}\"\n                    )\n                    self.wandb_run.finish()\n                    self.wandb_run = None\n\n                self.log_dict = {}\n                self.bbox_media_panel_images = []\n            if self.result_artifact:\n                self.result_artifact.add(self.result_table, 'result')\n                wandb.log_artifact(\n                    self.result_artifact,\n                    aliases=[\n                        'latest', 'last', 'epoch ' + str(self.current_epoch),\n                        ('best' if best_result else '')]\n                    )\n\n                wandb.log({\"evaluation\": self.result_table})\n                columns = [\"epoch\", \"id\", \"ground truth\", \"prediction\"]\n                columns.extend(self.data_dict['names'])\n                self.result_table = wandb.Table(columns)\n                self.result_artifact = wandb.Artifact(\"run_\" + wandb.run.id + \"_progress\", \"evaluation\")\n\n    def finish_run(self):\n        \"\"\"\n        Log metrics if any and finish the current W&B run\n        \"\"\"\n        if self.wandb_run:\n            if self.log_dict:\n                with all_logging_disabled():\n                    wandb.log(self.log_dict)\n            wandb.run.finish()\n\n\n@contextmanager\ndef all_logging_disabled(highest_level=logging.CRITICAL):\n    \"\"\" source - https://gist.github.com/simon-weber/7853144\n    A context manager that will prevent any logging messages triggered during the body from being processed.\n    :param highest_level: the maximum logging level in use.\n      This would only need to be changed if a custom level greater than CRITICAL is defined.\n    \"\"\"\n    previous_level = logging.root.manager.disable\n    logging.disable(highest_level)\n    try:\n        yield\n    finally:\n        logging.disable(previous_level)\n"
  },
  {
    "path": "module/detect/utils/loss.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nLoss functions\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.metrics import bbox_iou\nfrom utils.torch_utils import de_parallel\n\n\ndef smooth_BCE(eps=0.1):  # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441\n    # return positive, negative label smoothing BCE targets\n    return 1.0 - 0.5 * eps, 0.5 * eps\n\n\nclass BCEBlurWithLogitsLoss(nn.Module):\n    # BCEwithLogitLoss() with reduced missing label effects.\n    def __init__(self, alpha=0.05):\n        super().__init__()\n        self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none')  # must be nn.BCEWithLogitsLoss()\n        self.alpha = alpha\n\n    def forward(self, pred, true):\n        loss = self.loss_fcn(pred, true)\n        pred = torch.sigmoid(pred)  # prob from logits\n        dx = pred - true  # reduce only missing label effects\n        # dx = (pred - true).abs()  # reduce missing label and false label effects\n        alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))\n        loss *= alpha_factor\n        return loss.mean()\n\n\nclass FocalLoss(nn.Module):\n    # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)\n    def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):\n        super().__init__()\n        self.loss_fcn = loss_fcn  # must be nn.BCEWithLogitsLoss()\n        self.gamma = gamma\n        self.alpha = alpha\n        self.reduction = loss_fcn.reduction\n        self.loss_fcn.reduction = 'none'  # required to apply FL to each element\n\n    def forward(self, pred, true):\n        loss = self.loss_fcn(pred, true)\n        # p_t = torch.exp(-loss)\n        # loss *= self.alpha * (1.000001 - p_t) ** self.gamma  # non-zero power for gradient stability\n\n        # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py\n        pred_prob = torch.sigmoid(pred)  # prob from logits\n        p_t = true * pred_prob + (1 - true) * (1 - pred_prob)\n        alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)\n        modulating_factor = (1.0 - p_t) ** self.gamma\n        loss *= alpha_factor * modulating_factor\n\n        if self.reduction == 'mean':\n            return loss.mean()\n        elif self.reduction == 'sum':\n            return loss.sum()\n        else:  # 'none'\n            return loss\n\n\nclass QFocalLoss(nn.Module):\n    # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)\n    def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):\n        super().__init__()\n        self.loss_fcn = loss_fcn  # must be nn.BCEWithLogitsLoss()\n        self.gamma = gamma\n        self.alpha = alpha\n        self.reduction = loss_fcn.reduction\n        self.loss_fcn.reduction = 'none'  # required to apply FL to each element\n\n    def forward(self, pred, true):\n        loss = self.loss_fcn(pred, true)\n\n        pred_prob = torch.sigmoid(pred)  # prob from logits\n        alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)\n        modulating_factor = torch.abs(true - pred_prob) ** self.gamma\n        loss *= alpha_factor * modulating_factor\n\n        if self.reduction == 'mean':\n            return loss.mean()\n        elif self.reduction == 'sum':\n            return loss.sum()\n        else:  # 'none'\n            return loss\n\n\nclass ComputeLoss:\n    sort_obj_iou = False\n\n    # Compute losses\n    def __init__(self, model, autobalance=False):\n        device = next(model.parameters()).device  # get model device\n        h = model.hyp  # hyperparameters\n\n        # Define criteria\n        BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))\n        BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))\n\n        # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3\n        self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0))  # positive, negative BCE targets\n\n        # Focal loss\n        g = h['fl_gamma']  # focal loss gamma\n        if g > 0:\n            BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)\n\n        m = de_parallel(model).model[-1]  # Detect() module\n        self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02])  # P3-P7\n        self.ssi = list(m.stride).index(16) if autobalance else 0  # stride 16 index\n        self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance\n        self.na = m.na  # number of anchors\n        self.nc = m.nc  # number of classes\n        self.nl = m.nl  # number of layers\n        self.anchors = m.anchors\n        self.device = device\n\n    def __call__(self, p, targets):  # predictions, targets\n        lcls = torch.zeros(1, device=self.device)  # class loss\n        lbox = torch.zeros(1, device=self.device)  # box loss\n        lobj = torch.zeros(1, device=self.device)  # object loss\n        tcls, tbox, indices, anchors = self.build_targets(p, targets)  # targets\n\n        # Losses\n        for i, pi in enumerate(p):  # layer index, layer predictions\n            b, a, gj, gi = indices[i]  # image, anchor, gridy, gridx\n            tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device)  # target obj\n\n            n = b.shape[0]  # number of targets\n            if n:\n                # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1)  # faster, requires torch 1.8.0\n                pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1)  # target-subset of predictions\n\n                # Regression\n                pxy = pxy.sigmoid() * 2 - 0.5\n                pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i]\n                pbox = torch.cat((pxy, pwh), 1)  # predicted box\n                iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze()  # iou(prediction, target)\n                lbox += (1.0 - iou).mean()  # iou loss\n\n                # Objectness\n                iou = iou.detach().clamp(0).type(tobj.dtype)\n                if self.sort_obj_iou:\n                    j = iou.argsort()\n                    b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j]\n                if self.gr < 1:\n                    iou = (1.0 - self.gr) + self.gr * iou\n                tobj[b, a, gj, gi] = iou  # iou ratio\n\n                # Classification\n                if self.nc > 1:  # cls loss (only if multiple classes)\n                    t = torch.full_like(pcls, self.cn, device=self.device)  # targets\n                    t[range(n), tcls[i]] = self.cp\n                    lcls += self.BCEcls(pcls, t)  # BCE\n\n                # Append targets to text file\n                # with open('targets.txt', 'a') as file:\n                #     [file.write('%11.5g ' * 4 % tuple(x) + '\\n') for x in torch.cat((txy[i], twh[i]), 1)]\n\n            obji = self.BCEobj(pi[..., 4], tobj)\n            lobj += obji * self.balance[i]  # obj loss\n            if self.autobalance:\n                self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()\n\n        if self.autobalance:\n            self.balance = [x / self.balance[self.ssi] for x in self.balance]\n        lbox *= self.hyp['box']\n        lobj *= self.hyp['obj']\n        lcls *= self.hyp['cls']\n        bs = tobj.shape[0]  # batch size\n\n        return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach()\n\n    def build_targets(self, p, targets):\n        # Build targets for compute_loss(), input targets(image,class,x,y,w,h)\n        na, nt = self.na, targets.shape[0]  # number of anchors, targets\n        tcls, tbox, indices, anch = [], [], [], []\n        gain = torch.ones(7, device=self.device)  # normalized to gridspace gain\n        ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt)  # same as .repeat_interleave(nt)\n        targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None]), 2)  # append anchor indices\n\n        g = 0.5  # bias\n        off = torch.tensor(\n            [\n                [0, 0],\n                [1, 0],\n                [0, 1],\n                [-1, 0],\n                [0, -1],  # j,k,l,m\n                # [1, 1], [1, -1], [-1, 1], [-1, -1],  # jk,jm,lk,lm\n            ],\n            device=self.device).float() * g  # offsets\n\n        for i in range(self.nl):\n            anchors, shape = self.anchors[i], p[i].shape\n            gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]]  # xyxy gain\n\n            # Match targets to anchors\n            t = targets * gain  # shape(3,n,7)\n            if nt:\n                # Matches\n                r = t[..., 4:6] / anchors[:, None]  # wh ratio\n                j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t']  # compare\n                # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t']  # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))\n                t = t[j]  # filter\n\n                # Offsets\n                gxy = t[:, 2:4]  # grid xy\n                gxi = gain[[2, 3]] - gxy  # inverse\n                j, k = ((gxy % 1 < g) & (gxy > 1)).T\n                l, m = ((gxi % 1 < g) & (gxi > 1)).T\n                j = torch.stack((torch.ones_like(j), j, k, l, m))\n                t = t.repeat((5, 1, 1))[j]\n                offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]\n            else:\n                t = targets[0]\n                offsets = 0\n\n            # Define\n            bc, gxy, gwh, a = t.chunk(4, 1)  # (image, class), grid xy, grid wh, anchors\n            a, (b, c) = a.long().view(-1), bc.long().T  # anchors, image, class\n            gij = (gxy - offsets).long()\n            gi, gj = gij.T  # grid indices\n\n            # Append\n            indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1)))  # image, anchor, grid\n            tbox.append(torch.cat((gxy - gij, gwh), 1))  # box\n            anch.append(anchors[a])  # anchors\n            tcls.append(c)  # class\n\n        return tcls, tbox, indices, anch\n"
  },
  {
    "path": "module/detect/utils/metrics.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nModel validation metrics\n\"\"\"\n\nimport math\nimport warnings\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\n\n\ndef fitness(x):\n    # Model fitness as a weighted combination of metrics\n    w = [0.0, 0.0, 0.1, 0.9]  # weights for [P, R, mAP@0.5, mAP@0.5:0.95]\n    return (x[:, :4] * w).sum(1)\n\n\ndef smooth(y, f=0.05):\n    # Box filter of fraction f\n    nf = round(len(y) * f * 2) // 2 + 1  # number of filter elements (must be odd)\n    p = np.ones(nf // 2)  # ones padding\n    yp = np.concatenate((p * y[0], y, p * y[-1]), 0)  # y padded\n    return np.convolve(yp, np.ones(nf) / nf, mode='valid')  # y-smoothed\n\n\ndef ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16):\n    \"\"\" Compute the average precision, given the recall and precision curves.\n    Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.\n    # Arguments\n        tp:  True positives (nparray, nx1 or nx10).\n        conf:  Objectness value from 0-1 (nparray).\n        pred_cls:  Predicted object classes (nparray).\n        target_cls:  True object classes (nparray).\n        plot:  Plot precision-recall curve at mAP@0.5\n        save_dir:  Plot save directory\n    # Returns\n        The average precision as computed in py-faster-rcnn.\n    \"\"\"\n\n    # Sort by objectness\n    i = np.argsort(-conf)\n    tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]\n\n    # Find unique classes\n    unique_classes, nt = np.unique(target_cls, return_counts=True)\n    nc = unique_classes.shape[0]  # number of classes, number of detections\n\n    # Create Precision-Recall curve and compute AP for each class\n    px, py = np.linspace(0, 1, 1000), []  # for plotting\n    ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))\n    for ci, c in enumerate(unique_classes):\n        i = pred_cls == c\n        n_l = nt[ci]  # number of labels\n        n_p = i.sum()  # number of predictions\n        if n_p == 0 or n_l == 0:\n            continue\n\n        # Accumulate FPs and TPs\n        fpc = (1 - tp[i]).cumsum(0)\n        tpc = tp[i].cumsum(0)\n\n        # Recall\n        recall = tpc / (n_l + eps)  # recall curve\n        r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0)  # negative x, xp because xp decreases\n\n        # Precision\n        precision = tpc / (tpc + fpc)  # precision curve\n        p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1)  # p at pr_score\n\n        # AP from recall-precision curve\n        for j in range(tp.shape[1]):\n            ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])\n            if plot and j == 0:\n                py.append(np.interp(px, mrec, mpre))  # precision at mAP@0.5\n\n    # Compute F1 (harmonic mean of precision and recall)\n    f1 = 2 * p * r / (p + r + eps)\n    names = [v for k, v in names.items() if k in unique_classes]  # list: only classes that have data\n    names = dict(enumerate(names))  # to dict\n    if plot:\n        plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)\n        plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')\n        plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')\n        plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')\n\n    i = smooth(f1.mean(0), 0.1).argmax()  # max F1 index\n    p, r, f1 = p[:, i], r[:, i], f1[:, i]\n    tp = (r * nt).round()  # true positives\n    fp = (tp / (p + eps) - tp).round()  # false positives\n    return tp, fp, p, r, f1, ap, unique_classes.astype(int)\n\n\ndef compute_ap(recall, precision):\n    \"\"\" Compute the average precision, given the recall and precision curves\n    # Arguments\n        recall:    The recall curve (list)\n        precision: The precision curve (list)\n    # Returns\n        Average precision, precision curve, recall curve\n    \"\"\"\n\n    # Append sentinel values to beginning and end\n    mrec = np.concatenate(([0.0], recall, [1.0]))\n    mpre = np.concatenate(([1.0], precision, [0.0]))\n\n    # Compute the precision envelope\n    mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))\n\n    # Integrate area under curve\n    method = 'interp'  # methods: 'continuous', 'interp'\n    if method == 'interp':\n        x = np.linspace(0, 1, 101)  # 101-point interp (COCO)\n        ap = np.trapz(np.interp(x, mrec, mpre), x)  # integrate\n    else:  # 'continuous'\n        i = np.where(mrec[1:] != mrec[:-1])[0]  # points where x axis (recall) changes\n        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])  # area under curve\n\n    return ap, mpre, mrec\n\n\nclass ConfusionMatrix:\n    # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n    def __init__(self, nc, conf=0.25, iou_thres=0.45):\n        self.matrix = np.zeros((nc + 1, nc + 1))\n        self.nc = nc  # number of classes\n        self.conf = conf\n        self.iou_thres = iou_thres\n\n    def process_batch(self, detections, labels):\n        \"\"\"\n        Return intersection-over-union (Jaccard index) of boxes.\n        Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n        Arguments:\n            detections (Array[N, 6]), x1, y1, x2, y2, conf, class\n            labels (Array[M, 5]), class, x1, y1, x2, y2\n        Returns:\n            None, updates confusion matrix accordingly\n        \"\"\"\n        detections = detections[detections[:, 4] > self.conf]\n        gt_classes = labels[:, 0].int()\n        detection_classes = detections[:, 5].int()\n        iou = box_iou(labels[:, 1:], detections[:, :4])\n\n        x = torch.where(iou > self.iou_thres)\n        if x[0].shape[0]:\n            matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n            if x[0].shape[0] > 1:\n                matches = matches[matches[:, 2].argsort()[::-1]]\n                matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n                matches = matches[matches[:, 2].argsort()[::-1]]\n                matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n        else:\n            matches = np.zeros((0, 3))\n\n        n = matches.shape[0] > 0\n        m0, m1, _ = matches.transpose().astype(int)\n        for i, gc in enumerate(gt_classes):\n            j = m0 == i\n            if n and sum(j) == 1:\n                self.matrix[detection_classes[m1[j]], gc] += 1  # correct\n            else:\n                self.matrix[self.nc, gc] += 1  # background FP\n\n        if n:\n            for i, dc in enumerate(detection_classes):\n                if not any(m1 == i):\n                    self.matrix[dc, self.nc] += 1  # background FN\n\n    def matrix(self):\n        return self.matrix\n\n    def tp_fp(self):\n        tp = self.matrix.diagonal()  # true positives\n        fp = self.matrix.sum(1) - tp  # false positives\n        # fn = self.matrix.sum(0) - tp  # false negatives (missed detections)\n        return tp[:-1], fp[:-1]  # remove background class\n\n    def plot(self, normalize=True, save_dir='', names=()):\n        try:\n            import seaborn as sn\n\n            array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1)  # normalize columns\n            array[array < 0.005] = np.nan  # don't annotate (would appear as 0.00)\n\n            fig = plt.figure(figsize=(12, 9), tight_layout=True)\n            nc, nn = self.nc, len(names)  # number of classes, names\n            sn.set(font_scale=1.0 if nc < 50 else 0.8)  # for label size\n            labels = (0 < nn < 99) and (nn == nc)  # apply names to ticklabels\n            with warnings.catch_warnings():\n                warnings.simplefilter('ignore')  # suppress empty matrix RuntimeWarning: All-NaN slice encountered\n                sn.heatmap(array,\n                           annot=nc < 30,\n                           annot_kws={\n                               \"size\": 8},\n                           cmap='Blues',\n                           fmt='.2f',\n                           square=True,\n                           vmin=0.0,\n                           xticklabels=names + ['background FP'] if labels else \"auto\",\n                           yticklabels=names + ['background FN'] if labels else \"auto\").set_facecolor((1, 1, 1))\n            fig.axes[0].set_xlabel('True')\n            fig.axes[0].set_ylabel('Predicted')\n            fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)\n            plt.close()\n        except Exception as e:\n            print(f'WARNING: ConfusionMatrix plot failure: {e}')\n\n    def print(self):\n        for i in range(self.nc + 1):\n            print(' '.join(map(str, self.matrix[i])))\n\n\ndef bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):\n    # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4)\n\n    # Get the coordinates of bounding boxes\n    if xywh:  # transform from xywh to xyxy\n        (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, 1), box2.chunk(4, 1)\n        w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2\n        b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_\n        b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_\n    else:  # x1, y1, x2, y2 = box1\n        b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, 1)\n        b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, 1)\n        w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1\n        w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1\n\n    # Intersection area\n    inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \\\n            (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)\n\n    # Union Area\n    union = w1 * h1 + w2 * h2 - inter + eps\n\n    # IoU\n    iou = inter / union\n    if CIoU or DIoU or GIoU:\n        cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1)  # convex (smallest enclosing box) width\n        ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1)  # convex height\n        if CIoU or DIoU:  # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1\n            c2 = cw ** 2 + ch ** 2 + eps  # convex diagonal squared\n            rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4  # center dist ** 2\n            if CIoU:  # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47\n                v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2)\n                with torch.no_grad():\n                    alpha = v / (v - iou + (1 + eps))\n                return iou - (rho2 / c2 + v * alpha)  # CIoU\n            return iou - rho2 / c2  # DIoU\n        c_area = cw * ch + eps  # convex area\n        return iou - (c_area - union) / c_area  # GIoU https://arxiv.org/pdf/1902.09630.pdf\n    return iou  # IoU\n\n\ndef box_area(box):\n    # box = xyxy(4,n)\n    return (box[2] - box[0]) * (box[3] - box[1])\n\n\ndef box_iou(box1, box2, eps=1e-7):\n    # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n    \"\"\"\n    Return intersection-over-union (Jaccard index) of boxes.\n    Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n    Arguments:\n        box1 (Tensor[N, 4])\n        box2 (Tensor[M, 4])\n    Returns:\n        iou (Tensor[N, M]): the NxM matrix containing the pairwise\n            IoU values for every element in boxes1 and boxes2\n    \"\"\"\n\n    # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n    (a1, a2), (b1, b2) = box1[:, None].chunk(2, 2), box2.chunk(2, 1)\n    inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)\n\n    # IoU = inter / (area1 + area2 - inter)\n    return inter / (box_area(box1.T)[:, None] + box_area(box2.T) - inter + eps)\n\n\ndef bbox_ioa(box1, box2, eps=1e-7):\n    \"\"\" Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2\n    box1:       np.array of shape(4)\n    box2:       np.array of shape(nx4)\n    returns:    np.array of shape(n)\n    \"\"\"\n\n    # Get the coordinates of bounding boxes\n    b1_x1, b1_y1, b1_x2, b1_y2 = box1\n    b2_x1, b2_y1, b2_x2, b2_y2 = box2.T\n\n    # Intersection area\n    inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \\\n                 (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)\n\n    # box2 area\n    box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps\n\n    # Intersection over box2 area\n    return inter_area / box2_area\n\n\ndef wh_iou(wh1, wh2, eps=1e-7):\n    # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2\n    wh1 = wh1[:, None]  # [N,1,2]\n    wh2 = wh2[None]  # [1,M,2]\n    inter = torch.min(wh1, wh2).prod(2)  # [N,M]\n    return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps)  # iou = inter / (area1 + area2 - inter)\n\n\n# Plots ----------------------------------------------------------------------------------------------------------------\n\n\ndef plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()):\n    # Precision-recall curve\n    fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)\n    py = np.stack(py, axis=1)\n\n    if 0 < len(names) < 21:  # display per-class legend if < 21 classes\n        for i, y in enumerate(py.T):\n            ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}')  # plot(recall, precision)\n    else:\n        ax.plot(px, py, linewidth=1, color='grey')  # plot(recall, precision)\n\n    ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean())\n    ax.set_xlabel('Recall')\n    ax.set_ylabel('Precision')\n    ax.set_xlim(0, 1)\n    ax.set_ylim(0, 1)\n    plt.legend(bbox_to_anchor=(1.04, 1), loc=\"upper left\")\n    fig.savefig(save_dir, dpi=250)\n    plt.close()\n\n\ndef plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'):\n    # Metric-confidence curve\n    fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)\n\n    if 0 < len(names) < 21:  # display per-class legend if < 21 classes\n        for i, y in enumerate(py):\n            ax.plot(px, y, linewidth=1, label=f'{names[i]}')  # plot(confidence, metric)\n    else:\n        ax.plot(px, py.T, linewidth=1, color='grey')  # plot(confidence, metric)\n\n    y = smooth(py.mean(0), 0.05)\n    ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}')\n    ax.set_xlabel(xlabel)\n    ax.set_ylabel(ylabel)\n    ax.set_xlim(0, 1)\n    ax.set_ylim(0, 1)\n    plt.legend(bbox_to_anchor=(1.04, 1), loc=\"upper left\")\n    fig.savefig(save_dir, dpi=250)\n    plt.close()\n"
  },
  {
    "path": "module/detect/utils/plots.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nPlotting utils\n\"\"\"\n\nimport math\nimport os\nfrom copy import copy\nfrom pathlib import Path\nfrom urllib.error import URLError\n\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sn\nimport torch\nfrom PIL import Image, ImageDraw, ImageFont\nfrom utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords,\n                           increment_path, is_ascii, threaded, try_except, xywh2xyxy, xyxy2xywh)\nfrom utils.metrics import fitness\n\n# Settings\nRANK = int(os.getenv('RANK', -1))\nmatplotlib.rc('font', **{'size': 11})\nmatplotlib.use('Agg')  # for writing to files only\n\n\nclass Colors:\n    # Ultralytics color palette https://ultralytics.com/\n    def __init__(self):\n        # hex = matplotlib.colors.TABLEAU_COLORS.values()\n        hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',\n                '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')\n        self.palette = [self.hex2rgb(f'#{c}') for c in hexs]\n        self.n = len(self.palette)\n\n    def __call__(self, i, bgr=False):\n        c = self.palette[int(i) % self.n]\n        return (c[2], c[1], c[0]) if bgr else c\n\n    @staticmethod\n    def hex2rgb(h):  # rgb order (PIL)\n        return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))\n\n\ncolors = Colors()  # create instance for 'from utils.plots import colors'\n\n\ndef check_pil_font(font=FONT, size=10):\n    # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary\n    font = Path(font)\n    font = font if font.exists() else (CONFIG_DIR / font.name)\n    try:\n        return ImageFont.truetype(str(font) if font.exists() else font.name, size)\n    except Exception:  # download if missing\n        try:\n            check_font(font)\n            return ImageFont.truetype(str(font), size)\n        except TypeError:\n            check_requirements('Pillow>=8.4.0')  # known issue https://github.com/ultralytics/yolov5/issues/5374\n        except URLError:  # not online\n            return ImageFont.load_default()\n\n\nclass Annotator:\n    # YOLOv5 Annotator for train/eval mosaics and jpgs and detect/hub inference annotations\n    def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):\n        assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'\n        non_ascii = not is_ascii(example)  # non-latin labels, i.e. asian, arabic, cyrillic\n        self.pil = pil or non_ascii\n        if self.pil:  # use PIL\n            self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)\n            self.draw = ImageDraw.Draw(self.im)\n            self.font = check_pil_font(\n                font='Arial.Unicode.ttf' if non_ascii else font,\n                size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)\n                )\n        else:  # use cv2\n            self.im = im\n        self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2)  # line width\n\n    def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n        # Add one xyxy box to image with label\n        if self.pil or not is_ascii(label):\n            self.draw.rectangle(box, width=self.lw, outline=color)  # box\n            if label:\n                w, h = self.font.getsize(label)  # text width, height\n                outside = box[1] - h >= 0  # label fits outside box\n                self.draw.rectangle(\n                    (box[0], box[1] - h if outside else box[1], box[0] + w + 1,\n                     box[1] + 1 if outside else box[1] + h + 1),\n                    fill=color,\n                )\n                # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls')  # for PIL>8.0\n                self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)\n        else:  # cv2\n            p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))\n            cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)\n            if label:\n                tf = max(self.lw - 1, 1)  # font thickness\n                w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0]  # text width, height\n                outside = p1[1] - h >= 3\n                p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3\n                cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA)  # filled\n                cv2.putText(\n                    self.im,\n                    label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),\n                    0,\n                    self.lw / 3,\n                    txt_color,\n                    thickness=tf,\n                    lineType=cv2.LINE_AA\n                    )\n\n    def rectangle(self, xy, fill=None, outline=None, width=1):\n        # Add rectangle to image (PIL-only)\n        self.draw.rectangle(xy, fill, outline, width)\n\n    def text(self, xy, text, txt_color=(255, 255, 255)):\n        # Add text to image (PIL-only)\n        w, h = self.font.getsize(text)  # text width, height\n        self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font)\n\n    def result(self):\n        # Return annotated image as array\n        return np.asarray(self.im)\n\n\ndef feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):\n    \"\"\"\n    x:              Features to be visualized\n    module_type:    Module type\n    stage:          Module stage within model\n    n:              Maximum number of feature maps to plot\n    save_dir:       Directory to save results\n    \"\"\"\n    if 'Detect' not in module_type:\n        batch, channels, height, width = x.shape  # batch, channels, height, width\n        if height > 1 and width > 1:\n            f = save_dir / f\"stage{stage}_{module_type.split('.')[-1]}_features.png\"  # filename\n\n            blocks = torch.chunk(x[0].cpu(), channels, dim=0)  # select batch index 0, block by channels\n            n = min(n, channels)  # number of plots\n            fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True)  # 8 rows x n/8 cols\n            ax = ax.ravel()\n            plt.subplots_adjust(wspace=0.05, hspace=0.05)\n            for i in range(n):\n                ax[i].imshow(blocks[i].squeeze())  # cmap='gray'\n                ax[i].axis('off')\n\n            LOGGER.info(f'Saving {f}... ({n}/{channels})')\n            plt.savefig(f, dpi=300, bbox_inches='tight')\n            plt.close()\n            np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy())  # npy save\n\n\ndef hist2d(x, y, n=100):\n    # 2d histogram used in labels.png and evolve.png\n    xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)\n    hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))\n    xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)\n    yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)\n    return np.log(hist[xidx, yidx])\n\n\ndef butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):\n    from scipy.signal import butter, filtfilt\n\n    # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy\n    def butter_lowpass(cutoff, fs, order):\n        nyq = 0.5 * fs\n        normal_cutoff = cutoff / nyq\n        return butter(order, normal_cutoff, btype='low', analog=False)\n\n    b, a = butter_lowpass(cutoff, fs, order=order)\n    return filtfilt(b, a, data)  # forward-backward filter\n\n\ndef output_to_target(output):\n    # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]\n    targets = []\n    for i, o in enumerate(output):\n        for *box, conf, cls in o.cpu().numpy():\n            targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])\n    return np.array(targets)\n\n\n@threaded\ndef plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16):\n    # Plot image grid with labels\n    if isinstance(images, torch.Tensor):\n        images = images.cpu().float().numpy()\n    if isinstance(targets, torch.Tensor):\n        targets = targets.cpu().numpy()\n    if np.max(images[0]) <= 1:\n        images *= 255  # de-normalise (optional)\n    bs, _, h, w = images.shape  # batch size, _, height, width\n    bs = min(bs, max_subplots)  # limit plot images\n    ns = np.ceil(bs ** 0.5)  # number of subplots (square)\n\n    # Build Image\n    mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)  # init\n    for i, im in enumerate(images):\n        if i == max_subplots:  # if last batch has fewer images than we expect\n            break\n        x, y = int(w * (i // ns)), int(h * (i % ns))  # block origin\n        im = im.transpose(1, 2, 0)\n        mosaic[y:y + h, x:x + w, :] = im\n\n    # Resize (optional)\n    scale = max_size / ns / max(h, w)\n    if scale < 1:\n        h = math.ceil(scale * h)\n        w = math.ceil(scale * w)\n        mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))\n\n    # Annotate\n    fs = int((h + w) * ns * 0.01)  # font size\n    annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)\n    for i in range(i + 1):\n        x, y = int(w * (i // ns)), int(h * (i % ns))  # block origin\n        annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2)  # borders\n        if paths:\n            annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220))  # filenames\n        if len(targets) > 0:\n            ti = targets[targets[:, 0] == i]  # image targets\n            boxes = xywh2xyxy(ti[:, 2:6]).T\n            classes = ti[:, 1].astype('int')\n            labels = ti.shape[1] == 6  # labels if no conf column\n            conf = None if labels else ti[:, 6]  # check for confidence presence (label vs pred)\n\n            if boxes.shape[1]:\n                if boxes.max() <= 1.01:  # if normalized with tolerance 0.01\n                    boxes[[0, 2]] *= w  # scale to pixels\n                    boxes[[1, 3]] *= h\n                elif scale < 1:  # absolute coords need scale if image scales\n                    boxes *= scale\n            boxes[[0, 2]] += x\n            boxes[[1, 3]] += y\n            for j, box in enumerate(boxes.T.tolist()):\n                cls = classes[j]\n                color = colors(cls)\n                cls = names[cls] if names else cls\n                if labels or conf[j] > 0.25:  # 0.25 conf thresh\n                    label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'\n                    annotator.box_label(box, label, color=color)\n    annotator.im.save(fname)  # save\n\n\ndef plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):\n    # Plot LR simulating training for full epochs\n    optimizer, scheduler = copy(optimizer), copy(scheduler)  # do not modify originals\n    y = []\n    for _ in range(epochs):\n        scheduler.step()\n        y.append(optimizer.param_groups[0]['lr'])\n    plt.plot(y, '.-', label='LR')\n    plt.xlabel('epoch')\n    plt.ylabel('LR')\n    plt.grid()\n    plt.xlim(0, epochs)\n    plt.ylim(0)\n    plt.savefig(Path(save_dir) / 'LR.png', dpi=200)\n    plt.close()\n\n\ndef plot_val_txt():  # from utils.plots import *; plot_val()\n    # Plot eval.txt histograms\n    x = np.loadtxt('eval.txt', dtype=np.float32)\n    box = xyxy2xywh(x[:, :4])\n    cx, cy = box[:, 0], box[:, 1]\n\n    fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)\n    ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)\n    ax.set_aspect('equal')\n    plt.savefig('hist2d.png', dpi=300)\n\n    fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)\n    ax[0].hist(cx, bins=600)\n    ax[1].hist(cy, bins=600)\n    plt.savefig('hist1d.png', dpi=200)\n\n\ndef plot_targets_txt():  # from utils.plots import *; plot_targets_txt()\n    # Plot targets.txt histograms\n    x = np.loadtxt('targets.txt', dtype=np.float32).T\n    s = ['x targets', 'y targets', 'width targets', 'height targets']\n    fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)\n    ax = ax.ravel()\n    for i in range(4):\n        ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}')\n        ax[i].legend()\n        ax[i].set_title(s[i])\n    plt.savefig('targets.jpg', dpi=200)\n\n\ndef plot_val_study(file='', dir='', x=None):  # from utils.plots import *; plot_val_study()\n    # Plot file=study.txt generated by eval.py (or plot all study*.txt in dir)\n    save_dir = Path(file).parent if file else Path(dir)\n    plot2 = False  # plot additional results\n    if plot2:\n        ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()\n\n    fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)\n    # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:\n    for f in sorted(save_dir.glob('study*.txt')):\n        y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T\n        x = np.arange(y.shape[1]) if x is None else np.array(x)\n        if plot2:\n            s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']\n            for i in range(7):\n                ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)\n                ax[i].set_title(s[i])\n\n        j = y[3].argmax() + 1\n        ax2.plot(\n            y[5, 1:j],\n            y[3, 1:j] * 1E2,\n            '.-',\n            linewidth=2,\n            markersize=8,\n            label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')\n            )\n\n    ax2.plot(\n        1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],\n        'k.-',\n        linewidth=2,\n        markersize=8,\n        alpha=.25,\n        label='EfficientDet'\n        )\n\n    ax2.grid(alpha=0.2)\n    ax2.set_yticks(np.arange(20, 60, 5))\n    ax2.set_xlim(0, 57)\n    ax2.set_ylim(25, 55)\n    ax2.set_xlabel('GPU Speed (ms/img)')\n    ax2.set_ylabel('COCO AP eval')\n    ax2.legend(loc='lower right')\n    f = save_dir / 'study.png'\n    print(f'Saving {f}...')\n    plt.savefig(f, dpi=300)\n\n\n@try_except  # known issue https://github.com/ultralytics/yolov5/issues/5395\n@Timeout(30)  # known issue https://github.com/ultralytics/yolov5/issues/5611\ndef plot_labels(labels, names=(), save_dir=Path('')):\n    # plot dataset labels\n    LOGGER.info(f\"Plotting labels to {save_dir / 'labels.jpg'}... \")\n    c, b = labels[:, 0], labels[:, 1:].transpose()  # classes, boxes\n    nc = int(c.max() + 1)  # number of classes\n    x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])\n\n    # seaborn correlogram\n    sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))\n    plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)\n    plt.close()\n\n    # matplotlib labels\n    matplotlib.use('svg')  # faster\n    ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()\n    y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)\n    try:  # color histogram bars by class\n        [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)]  # known issue #3195\n    except Exception:\n        pass\n    ax[0].set_ylabel('instances')\n    if 0 < len(names) < 30:\n        ax[0].set_xticks(range(len(names)))\n        ax[0].set_xticklabels(names, rotation=90, fontsize=10)\n    else:\n        ax[0].set_xlabel('classes')\n    sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)\n    sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)\n\n    # rectangles\n    labels[:, 1:3] = 0.5  # center\n    labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000\n    img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)\n    for cls, *box in labels[:1000]:\n        ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls))  # plot\n    ax[1].imshow(img)\n    ax[1].axis('off')\n\n    for a in [0, 1, 2, 3]:\n        for s in ['top', 'right', 'left', 'bottom']:\n            ax[a].spines[s].set_visible(False)\n\n    plt.savefig(save_dir / 'labels.jpg', dpi=200)\n    matplotlib.use('Agg')\n    plt.close()\n\n\ndef plot_evolve(evolve_csv='path/to/evolve.csv'):  # from utils.plots import *; plot_evolve()\n    # Plot evolve.csv hyp evolution results\n    evolve_csv = Path(evolve_csv)\n    data = pd.read_csv(evolve_csv)\n    keys = [x.strip() for x in data.columns]\n    x = data.values\n    f = fitness(x)\n    j = np.argmax(f)  # max fitness index\n    plt.figure(figsize=(10, 12), tight_layout=True)\n    matplotlib.rc('font', **{'size': 8})\n    print(f'Best results from row {j} of {evolve_csv}:')\n    for i, k in enumerate(keys[7:]):\n        v = x[:, 7 + i]\n        mu = v[j]  # best single result\n        plt.subplot(6, 5, i + 1)\n        plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')\n        plt.plot(mu, f.max(), 'k+', markersize=15)\n        plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9})  # limit to 40 characters\n        if i % 5 != 0:\n            plt.yticks([])\n        print(f'{k:>15}: {mu:.3g}')\n    f = evolve_csv.with_suffix('.png')  # filename\n    plt.savefig(f, dpi=200)\n    plt.close()\n    print(f'Saved {f}')\n\n\ndef plot_results(file='path/to/results.csv', dir=''):\n    # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')\n    save_dir = Path(file).parent if file else Path(dir)\n    fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)\n    ax = ax.ravel()\n    files = list(save_dir.glob('results*.csv'))\n    assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.'\n    for f in files:\n        try:\n            data = pd.read_csv(f)\n            s = [x.strip() for x in data.columns]\n            x = data.values[:, 0]\n            for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):\n                y = data.values[:, j].astype('float')\n                # y[y == 0] = np.nan  # don't show zero values\n                ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8)\n                ax[i].set_title(s[j], fontsize=12)\n                # if j in [8, 9, 10]:  # share train and eval loss y axes\n                #     ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])\n        except Exception as e:\n            LOGGER.info(f'Warning: Plotting error for {f}: {e}')\n    ax[1].legend()\n    fig.savefig(save_dir / 'results.png', dpi=200)\n    plt.close()\n\n\ndef profile_idetection(start=0, stop=0, labels=(), save_dir=''):\n    # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()\n    ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()\n    s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']\n    files = list(Path(save_dir).glob('frames*.txt'))\n    for fi, f in enumerate(files):\n        try:\n            results = np.loadtxt(f, ndmin=2).T[:, 90:-30]  # clip first and last rows\n            n = results.shape[1]  # number of rows\n            x = np.arange(start, min(stop, n) if stop else n)\n            results = results[:, x]\n            t = (results[0] - results[0].min())  # set t0=0s\n            results[0] = x\n            for i, a in enumerate(ax):\n                if i < len(results):\n                    label = labels[fi] if len(labels) else f.stem.replace('frames_', '')\n                    a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)\n                    a.set_title(s[i])\n                    a.set_xlabel('time (s)')\n                    # if fi == len(files) - 1:\n                    #     a.set_ylim(bottom=0)\n                    for side in ['top', 'right']:\n                        a.spines[side].set_visible(False)\n                else:\n                    a.remove()\n        except Exception as e:\n            print(f'Warning: Plotting error for {f}; {e}')\n    ax[1].legend()\n    plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)\n\n\ndef save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True):\n    # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop\n    xyxy = torch.tensor(xyxy).view(-1, 4)\n    b = xyxy2xywh(xyxy)  # boxes\n    if square:\n        b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1)  # attempt rectangle to square\n    b[:, 2:] = b[:, 2:] * gain + pad  # box wh * gain + pad\n    xyxy = xywh2xyxy(b).long()\n    clip_coords(xyxy, im.shape)\n    crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]\n    if save:\n        file.parent.mkdir(parents=True, exist_ok=True)  # make directory\n        f = str(increment_path(file).with_suffix('.jpg'))\n        # cv2.imwrite(f, crop)  # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue\n        Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0)  # save RGB\n    return crop\n"
  },
  {
    "path": "module/detect/utils/torch_utils.py",
    "content": "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nPyTorch utils\n\"\"\"\n\nimport math\nimport os\nimport platform\nimport subprocess\nimport time\nimport warnings\nfrom contextlib import contextmanager\nfrom copy import deepcopy\nfrom pathlib import Path\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom utils.general import LOGGER, check_version, colorstr, file_date, git_describe\n\nLOCAL_RANK = int(os.getenv('LOCAL_RANK', -1))  # https://pytorch.org/docs/stable/elastic/run.html\nRANK = int(os.getenv('RANK', -1))\nWORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))\n\ntry:\n    import thop  # for FLOPs computation\nexcept ImportError:\n    thop = None\n\n# Suppress PyTorch warnings\nwarnings.filterwarnings('ignore', message='User provided device_type of \\'cuda\\', but CUDA is not available. Disabling')\n\n\ndef smart_DDP(model):\n    # Model DDP creation with checks\n    assert not check_version(torch.__version__, '1.12.0', pinned=True), \\\n        'torch==1.12.0 torchvision==0.13.0 DDP training is not supported due to a known issue. ' \\\n        'Please upgrade or downgrade torch to use DDP. See https://github.com/ultralytics/yolov5/issues/8395'\n    if check_version(torch.__version__, '1.11.0'):\n        return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True)\n    else:\n        return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK)\n\n\n@contextmanager\ndef torch_distributed_zero_first(local_rank: int):\n    # Decorator to make all processes in distributed training wait for each local_master to do something\n    if local_rank not in [-1, 0]:\n        dist.barrier(device_ids=[local_rank])\n    yield\n    if local_rank == 0:\n        dist.barrier(device_ids=[0])\n\n\ndef device_count():\n    # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Supports Linux and Windows\n    assert platform.system() in ('Linux', 'Windows'), 'device_count() only supported on Linux or Windows'\n    try:\n        cmd = 'nvidia-smi -L | wc -l' if platform.system() == 'Linux' else 'nvidia-smi -L | find /c /v \"\"'  # Windows\n        return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1])\n    except Exception:\n        return 0\n\n\ndef select_device(device='', batch_size=0, newline=True):\n    # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n    s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n    device = str(device).strip().lower().replace('cuda:', '').replace('none', '')  # to string, 'cuda:0' to '0'\n    cpu = device == 'cpu'\n    mps = device == 'mps'  # Apple Metal Performance Shaders (MPS)\n    if cpu or mps:\n        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'  # force torch.cuda.is_available() = False\n    elif device:  # non-cpu device requested\n        os.environ['CUDA_VISIBLE_DEVICES'] = device  # set environment variable - must be before assert is_available()\n        assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n            f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n    if not (cpu or mps) and torch.cuda.is_available():  # prefer GPU if available\n        devices = device.split(',') if device else '0'  # range(torch.cuda.device_count())  # i.e. 0,1,6,7\n        n = len(devices)  # device count\n        if n > 1 and batch_size > 0:  # check batch_size is divisible by device_count\n            assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n        space = ' ' * (len(s) + 1)\n        for i, d in enumerate(devices):\n            p = torch.cuda.get_device_properties(i)\n            s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\"  # bytes to MB\n        arg = 'cuda:0'\n    elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available():  # prefer MPS if available\n        s += 'MPS\\n'\n        arg = 'mps'\n    else:  # revert to CPU\n        s += 'CPU\\n'\n        arg = 'cpu'\n\n    if not newline:\n        s = s.rstrip()\n    LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s)  # emoji-safe\n    return torch.device(arg)\n\n\ndef time_sync():\n    # PyTorch-accurate time\n    if torch.cuda.is_available():\n        torch.cuda.synchronize()\n    return time.time()\n\n\ndef profile(input, ops, n=10, device=None):\n    # YOLOv5 speed/memory/FLOPs profiler\n    #\n    # Usage:\n    #     input = torch.randn(16, 3, 640, 640)\n    #     m1 = lambda x: x * torch.sigmoid(x)\n    #     m2 = nn.SiLU()\n    #     profile(input, [m1, m2], n=100)  # profile over 100 iterations\n\n    results = []\n    if not isinstance(device, torch.device):\n        device = select_device(device)\n    print(\n        f\"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}\"\n        f\"{'input':>24s}{'output':>24s}\"\n        )\n\n    for x in input if isinstance(input, list) else [input]:\n        x = x.to(device)\n        x.requires_grad = True\n        for m in ops if isinstance(ops, list) else [ops]:\n            m = m.to(device) if hasattr(m, 'to') else m  # device\n            m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m\n            tf, tb, t = 0, 0, [0, 0, 0]  # dt forward, backward\n            try:\n                flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2  # GFLOPs\n            except Exception:\n                flops = 0\n\n            try:\n                for _ in range(n):\n                    t[0] = time_sync()\n                    y = m(x)\n                    t[1] = time_sync()\n                    try:\n                        _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward()\n                        t[2] = time_sync()\n                    except Exception:  # no backward method\n                        # print(e)  # for debug\n                        t[2] = float('nan')\n                    tf += (t[1] - t[0]) * 1000 / n  # ms per op forward\n                    tb += (t[2] - t[1]) * 1000 / n  # ms per op backward\n                mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0  # (GB)\n                s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y))  # shapes\n                p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0  # parameters\n                print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}')\n                results.append([p, flops, mem, tf, tb, s_in, s_out])\n            except Exception as e:\n                print(e)\n                results.append(None)\n            torch.cuda.empty_cache()\n    return results\n\n\ndef is_parallel(model):\n    # Returns True if model is of type DP or DDP\n    return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)\n\n\ndef de_parallel(model):\n    # De-parallelize a model: returns single-GPU model if model is of type DP or DDP\n    return model.module if is_parallel(model) else model\n\n\ndef initialize_weights(model):\n    for m in model.modules():\n        t = type(m)\n        if t is nn.Conv2d:\n            pass  # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n        elif t is nn.BatchNorm2d:\n            m.eps = 1e-3\n            m.momentum = 0.03\n        elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n            m.inplace = True\n\n\ndef find_modules(model, mclass=nn.Conv2d):\n    # Finds layer indices matching module class 'mclass'\n    return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]\n\n\ndef sparsity(model):\n    # Return global model sparsity\n    a, b = 0, 0\n    for p in model.parameters():\n        a += p.numel()\n        b += (p == 0).sum()\n    return b / a\n\n\ndef prune(model, amount=0.3):\n    # Prune model to requested global sparsity\n    import torch.nn.utils.prune as prune\n    print('Pruning model... ', end='')\n    for name, m in model.named_modules():\n        if isinstance(m, nn.Conv2d):\n            prune.l1_unstructured(m, name='weight', amount=amount)  # prune\n            prune.remove(m, 'weight')  # make permanent\n    print(' %.3g global sparsity' % sparsity(model))\n\n\ndef fuse_conv_and_bn(conv, bn):\n    # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n    fusedconv = nn.Conv2d(\n        conv.in_channels,\n        conv.out_channels,\n        kernel_size=conv.kernel_size,\n        stride=conv.stride,\n        padding=conv.padding,\n        groups=conv.groups,\n        bias=True\n        ).requires_grad_(False).to(conv.weight.device)\n\n    # Prepare filters\n    w_conv = conv.weight.clone().view(conv.out_channels, -1)\n    w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n    fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))\n\n    # Prepare spatial bias\n    b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias\n    b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\n    fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n    return fusedconv\n\n\ndef model_info(model, verbose=False, img_size=640):\n    # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]\n    n_p = sum(x.numel() for x in model.parameters())  # number parameters\n    n_g = sum(x.numel() for x in model.parameters() if x.requires_grad)  # number gradients\n    if verbose:\n        print(f\"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}\")\n        for i, (name, p) in enumerate(model.named_parameters()):\n            name = name.replace('module_list.', '')\n            print(\n                '%5g %40s %9s %12g %20s %10.3g %10.3g' %\n                (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())\n                )\n\n    try:  # FLOPs\n        from thop import profile\n        stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32\n        img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device)  # input\n        flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2  # stride GFLOPs\n        img_size = img_size if isinstance(img_size, list) else [img_size, img_size]  # expand if int/float\n        fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride)  # 640x640 GFLOPs\n    except Exception:\n        fs = ''\n\n    name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model'\n    LOGGER.info(f\"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}\")\n\n\ndef scale_img(img, ratio=1.0, same_shape=False, gs=32):  # img(16,3,256,416)\n    # Scales img(bs,3,y,x) by ratio constrained to gs-multiple\n    if ratio == 1.0:\n        return img\n    h, w = img.shape[2:]\n    s = (int(h * ratio), int(w * ratio))  # new size\n    img = F.interpolate(img, size=s, mode='bilinear', align_corners=False)  # resize\n    if not same_shape:  # pad/crop img\n        h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w))\n    return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447)  # value = imagenet mean\n\n\ndef copy_attr(a, b, include=(), exclude=()):\n    # Copy attributes from b to a, options to only include [...] and to exclude [...]\n    for k, v in b.__dict__.items():\n        if (len(include) and k not in include) or k.startswith('_') or k in exclude:\n            continue\n        else:\n            setattr(a, k, v)\n\n\ndef smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, weight_decay=1e-5):\n    # YOLOv5 3-param group optimizer: 0) weights with decay, 1) weights no decay, 2) biases no decay\n    g = [], [], []  # optimizer parameter groups\n    bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k)  # normalization layers, i.e. BatchNorm2d()\n    for v in model.modules():\n        if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):  # bias (no decay)\n            g[2].append(v.bias)\n        if isinstance(v, bn):  # weight (no decay)\n            g[1].append(v.weight)\n        elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):  # weight (with decay)\n            g[0].append(v.weight)\n\n    if name == 'Adam':\n        optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999))  # adjust beta1 to momentum\n    elif name == 'AdamW':\n        optimizer = torch.optim.AdamW(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0)\n    elif name == 'RMSProp':\n        optimizer = torch.optim.RMSprop(g[2], lr=lr, momentum=momentum)\n    elif name == 'SGD':\n        optimizer = torch.optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True)\n    else:\n        raise NotImplementedError(f'Optimizer {name} not implemented.')\n\n    optimizer.add_param_group({'params': g[0], 'weight_decay': weight_decay})  # add g0 with weight_decay\n    optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0})  # add g1 (BatchNorm2d weights)\n    LOGGER.info(\n        f\"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups \"\n        f\"{len(g[1])} weight (no decay), {len(g[0])} weight, {len(g[2])} bias\"\n        )\n    return optimizer\n\n\nclass EarlyStopping:\n    # YOLOv5 simple early stopper\n    def __init__(self, patience=30):\n        self.best_fitness = 0.0  # i.e. mAP\n        self.best_epoch = 0\n        self.patience = patience or float('inf')  # epochs to wait after fitness stops improving to stop\n        self.possible_stop = False  # possible stop may occur next epoch\n\n    def __call__(self, epoch, fitness):\n        if fitness >= self.best_fitness:  # >= 0 to allow for early zero-fitness stage of training\n            self.best_epoch = epoch\n            self.best_fitness = fitness\n        delta = epoch - self.best_epoch  # epochs without improvement\n        self.possible_stop = delta >= (self.patience - 1)  # possible stop may occur next epoch\n        stop = delta >= self.patience  # stop training if patience exceeded\n        if stop:\n            LOGGER.info(\n                f'Stopping training early as no improvement observed in last {self.patience} epochs. '\n                f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\\n'\n                f'To update EarlyStopping(patience={self.patience}) pass a new patience value, '\n                f'i.e. `python trainfd.py --patience 300` or use `--patience 0` to disable EarlyStopping.'\n                )\n        return stop\n\n\nclass ModelEMA:\n    \"\"\" Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models\n    Keeps a moving average of everything in the model state_dict (parameters and buffers)\n    For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\n    \"\"\"\n\n    def __init__(self, model, decay=0.9999, tau=2000, updates=0):\n        # Create EMA\n        self.ema = deepcopy(de_parallel(model)).eval()  # FP32 EMA\n        # if next(model.parameters()).device.type != 'cpu':\n        #     self.ema.half()  # FP16 EMA\n        self.updates = updates  # number of EMA updates\n        self.decay = lambda x: decay * (1 - math.exp(-x / tau))  # decay exponential ramp (to help early epochs)\n        for p in self.ema.parameters():\n            p.requires_grad_(False)\n\n    def update(self, model):\n        # Update EMA parameters\n        with torch.no_grad():\n            self.updates += 1\n            d = self.decay(self.updates)\n\n            msd = de_parallel(model).state_dict()  # model state_dict\n            for k, v in self.ema.state_dict().items():\n                if v.dtype.is_floating_point:\n                    v *= d\n                    v += (1 - d) * msd[k].detach()\n\n    def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):\n        # Update EMA attributes\n        copy_attr(self.ema, model, include, exclude)\n"
  },
  {
    "path": "module/fuse/__init__.py",
    "content": ""
  },
  {
    "path": "module/fuse/discriminator.py",
    "content": "from torch import nn, Tensor\n\n\nclass Discriminator(nn.Module):\n    \"\"\"\n    Use to discriminate fused images and source images.\n    \"\"\"\n\n    def __init__(self, dim: int = 32, size: tuple[int, int] = (224, 224)):\n        super(Discriminator, self).__init__()\n\n        self.conv = nn.Sequential(\n            nn.Sequential(\n                nn.Conv2d(1, dim, (3, 3), (2, 2), 1),\n                nn.LeakyReLU(0.2, True),\n            ),\n            nn.Sequential(\n                nn.Conv2d(dim, dim * 2, (3, 3), (2, 2), 1),\n                nn.LeakyReLU(0.2, True),\n            ),\n            nn.Sequential(\n                nn.Conv2d(dim * 2, dim * 4, (3, 3), (2, 2), 1),\n                nn.LeakyReLU(0.2, True),\n            ),\n        )\n\n        self.flatten = nn.Flatten()\n        self.linear = nn.Linear((size[0] // 8) * (size[1] // 8) * 4 * dim, 1)\n\n    def forward(self, x: Tensor) -> Tensor:\n        x = self.conv(x)\n        x = self.flatten(x)\n        x = self.linear(x)\n        return x\n"
  },
  {
    "path": "module/fuse/generator.py",
    "content": "import torch\nimport torch.nn as nn\nfrom torch import Tensor\n\n\nclass Generator(nn.Module):\n    r\"\"\"\n    Use to generate fused images.\n    ir + vi -> fus\n    \"\"\"\n\n    def __init__(self, dim: int = 32, depth: int = 3):\n        super(Generator, self).__init__()\n        self.depth = depth\n\n        self.encoder = nn.Sequential(\n            nn.Conv2d(2, dim, (3, 3), (1, 1), 1),\n            nn.BatchNorm2d(dim),\n            nn.ReLU()\n        )\n\n        self.dense = nn.ModuleList([\n            nn.Sequential(\n                nn.Conv2d(dim * (i + 1), dim, (3, 3), (1, 1), 1),\n                nn.BatchNorm2d(dim),\n                nn.ReLU()\n            ) for i in range(depth)\n        ])\n\n        self.fuse = nn.Sequential(\n            nn.Sequential(\n                nn.Conv2d(dim * (depth + 1), dim * 4, (3, 3), (1, 1), 1),\n                nn.ReLU()\n            ),\n            nn.Sequential(\n                nn.Conv2d(dim * 4, dim * 2, (3, 3), (1, 1), 1),\n                nn.BatchNorm2d(dim * 2),\n                nn.ReLU()\n            ),\n            nn.Sequential(\n                nn.Conv2d(dim * 2, dim, (3, 3), (1, 1), 1),\n                nn.BatchNorm2d(dim),\n                nn.ReLU()\n            ),\n            nn.Sequential(\n                nn.Conv2d(dim, 1, (3, 3), (1, 1), 1),\n                nn.Tanh()\n            ),\n        )\n\n    def forward(self, ir: Tensor, vi: Tensor) -> Tensor:\n        src = torch.cat([ir, vi], dim=1)\n        x = self.encoder(src)\n        for i in range(self.depth):\n            t = self.dense[i](x)\n            x = torch.cat([x, t], dim=1)\n        fus = self.fuse(x)\n        return fus\n"
  },
  {
    "path": "module/saliency/__init__.py",
    "content": ""
  },
  {
    "path": "module/saliency/u2net.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n# U^2-Net: Going Deeper with Nested U-Structure for Salient Object Detection.\n# Author: Xuebin Qin, Zichen Zhang, Chenyang Huang et al.\n# Code Reference: https://github.com/xuebinqin/U-2-Net/blob/master/model/u2net.py\n\nclass REBNCONV(nn.Module):\n    def __init__(self, in_ch=3, out_ch=3, dirate=1):\n        super(REBNCONV, self).__init__()\n\n        self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate)\n        self.bn_s1 = nn.BatchNorm2d(out_ch)\n        self.relu_s1 = nn.ReLU(inplace=True)\n\n    def forward(self, x):\n        hx = x\n        xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))\n\n        return xout\n\n\n## upsample tensor 'src' to have the same spatial size with tensor 'tar'\ndef _upsample_like(src, tar):\n    src = nn.functional.interpolate(src, size=tar.shape[2:], mode='bilinear')\n    return src\n\n\n### RSU-7 ###\nclass RSU7(nn.Module):  # UNet07DRES(nn.Module):\n\n    def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n        super(RSU7, self).__init__()\n\n        self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)\n\n        self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)\n        self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)\n        self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)\n        self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)\n        self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1)\n        self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=1)\n\n        self.rebnconv7 = REBNCONV(mid_ch, mid_ch, dirate=2)\n\n        self.rebnconv6d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n        self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n        self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n        self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n        self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n        self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)\n\n    def forward(self, x):\n        hx = x\n        hxin = self.rebnconvin(hx)\n\n        hx1 = self.rebnconv1(hxin)\n        hx = self.pool1(hx1)\n\n        hx2 = self.rebnconv2(hx)\n        hx = self.pool2(hx2)\n\n        hx3 = self.rebnconv3(hx)\n        hx = self.pool3(hx3)\n\n        hx4 = self.rebnconv4(hx)\n        hx = self.pool4(hx4)\n\n        hx5 = self.rebnconv5(hx)\n        hx = self.pool5(hx5)\n\n        hx6 = self.rebnconv6(hx)\n\n        hx7 = self.rebnconv7(hx6)\n\n        hx6d = self.rebnconv6d(torch.cat((hx7, hx6), 1))\n        hx6dup = _upsample_like(hx6d, hx5)\n\n        hx5d = self.rebnconv5d(torch.cat((hx6dup, hx5), 1))\n        hx5dup = _upsample_like(hx5d, hx4)\n\n        hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1))\n        hx4dup = _upsample_like(hx4d, hx3)\n\n        hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))\n        hx3dup = _upsample_like(hx3d, hx2)\n\n        hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))\n        hx2dup = _upsample_like(hx2d, hx1)\n\n        hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))\n\n        return hx1d + hxin\n\n\n### RSU-6 ###\nclass RSU6(nn.Module):  # UNet06DRES(nn.Module):\n\n    def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n        super(RSU6, self).__init__()\n\n        self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)\n\n        self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)\n        self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)\n        self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)\n        self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)\n        self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1)\n\n        self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=2)\n\n        self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n        self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n        self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n        self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n        self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)\n\n    def forward(self, x):\n        hx = x\n\n        hxin = self.rebnconvin(hx)\n\n        hx1 = self.rebnconv1(hxin)\n        hx = self.pool1(hx1)\n\n        hx2 = self.rebnconv2(hx)\n        hx = self.pool2(hx2)\n\n        hx3 = self.rebnconv3(hx)\n        hx = self.pool3(hx3)\n\n        hx4 = self.rebnconv4(hx)\n        hx = self.pool4(hx4)\n\n        hx5 = self.rebnconv5(hx)\n\n        hx6 = self.rebnconv6(hx5)\n\n        hx5d = self.rebnconv5d(torch.cat((hx6, hx5), 1))\n        hx5dup = _upsample_like(hx5d, hx4)\n\n        hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1))\n        hx4dup = _upsample_like(hx4d, hx3)\n\n        hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))\n        hx3dup = _upsample_like(hx3d, hx2)\n\n        hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))\n        hx2dup = _upsample_like(hx2d, hx1)\n\n        hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))\n\n        return hx1d + hxin\n\n\n### RSU-5 ###\nclass RSU5(nn.Module):  # UNet05DRES(nn.Module):\n\n    def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n        super(RSU5, self).__init__()\n\n        self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)\n\n        self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)\n        self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)\n        self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)\n        self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)\n\n        self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=2)\n\n        self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n        self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n        self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n        self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)\n\n    def forward(self, x):\n        hx = x\n\n        hxin = self.rebnconvin(hx)\n\n        hx1 = self.rebnconv1(hxin)\n        hx = self.pool1(hx1)\n\n        hx2 = self.rebnconv2(hx)\n        hx = self.pool2(hx2)\n\n        hx3 = self.rebnconv3(hx)\n        hx = self.pool3(hx3)\n\n        hx4 = self.rebnconv4(hx)\n\n        hx5 = self.rebnconv5(hx4)\n\n        hx4d = self.rebnconv4d(torch.cat((hx5, hx4), 1))\n        hx4dup = _upsample_like(hx4d, hx3)\n\n        hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))\n        hx3dup = _upsample_like(hx3d, hx2)\n\n        hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))\n        hx2dup = _upsample_like(hx2d, hx1)\n\n        hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))\n\n        return hx1d + hxin\n\n\n### RSU-4 ###\nclass RSU4(nn.Module):  # UNet04DRES(nn.Module):\n\n    def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n        super(RSU4, self).__init__()\n\n        self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)\n\n        self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)\n        self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)\n        self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)\n\n        self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=2)\n\n        self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n        self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)\n        self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)\n\n    def forward(self, x):\n        hx = x\n\n        hxin = self.rebnconvin(hx)\n\n        hx1 = self.rebnconv1(hxin)\n        hx = self.pool1(hx1)\n\n        hx2 = self.rebnconv2(hx)\n        hx = self.pool2(hx2)\n\n        hx3 = self.rebnconv3(hx)\n\n        hx4 = self.rebnconv4(hx3)\n\n        hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1))\n        hx3dup = _upsample_like(hx3d, hx2)\n\n        hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))\n        hx2dup = _upsample_like(hx2d, hx1)\n\n        hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))\n\n        return hx1d + hxin\n\n\n### RSU-4F ###\nclass RSU4F(nn.Module):  # UNet04FRES(nn.Module):\n\n    def __init__(self, in_ch=3, mid_ch=12, out_ch=3):\n        super(RSU4F, self).__init__()\n\n        self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)\n\n        self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)\n        self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=2)\n        self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=4)\n\n        self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=8)\n\n        self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=4)\n        self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=2)\n        self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)\n\n    def forward(self, x):\n        hx = x\n\n        hxin = self.rebnconvin(hx)\n\n        hx1 = self.rebnconv1(hxin)\n        hx2 = self.rebnconv2(hx1)\n        hx3 = self.rebnconv3(hx2)\n\n        hx4 = self.rebnconv4(hx3)\n\n        hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1))\n        hx2d = self.rebnconv2d(torch.cat((hx3d, hx2), 1))\n        hx1d = self.rebnconv1d(torch.cat((hx2d, hx1), 1))\n\n        return hx1d + hxin\n\n\n##### U^2-Net ####\nclass U2NET(nn.Module):\n\n    def __init__(self, in_ch=3, out_ch=1):\n        super(U2NET, self).__init__()\n\n        self.stage1 = RSU7(in_ch, 32, 64)\n        self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.stage2 = RSU6(64, 32, 128)\n        self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.stage3 = RSU5(128, 64, 256)\n        self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.stage4 = RSU4(256, 128, 512)\n        self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.stage5 = RSU4F(512, 256, 512)\n        self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.stage6 = RSU4F(512, 256, 512)\n\n        # decoder\n        self.stage5d = RSU4F(1024, 256, 512)\n        self.stage4d = RSU4(1024, 128, 256)\n        self.stage3d = RSU5(512, 64, 128)\n        self.stage2d = RSU6(256, 32, 64)\n        self.stage1d = RSU7(128, 16, 64)\n\n        self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)\n        self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)\n        self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)\n        self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)\n        self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)\n        self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)\n\n        self.outconv = nn.Conv2d(6 * out_ch, out_ch, 1)\n\n    def forward(self, x):\n        hx = x\n\n        # stage 1\n        hx1 = self.stage1(hx)\n        hx = self.pool12(hx1)\n\n        # stage 2\n        hx2 = self.stage2(hx)\n        hx = self.pool23(hx2)\n\n        # stage 3\n        hx3 = self.stage3(hx)\n        hx = self.pool34(hx3)\n\n        # stage 4\n        hx4 = self.stage4(hx)\n        hx = self.pool45(hx4)\n\n        # stage 5\n        hx5 = self.stage5(hx)\n        hx = self.pool56(hx5)\n\n        # stage 6\n        hx6 = self.stage6(hx)\n        hx6up = _upsample_like(hx6, hx5)\n\n        # -------------------- decoder --------------------\n        hx5d = self.stage5d(torch.cat((hx6up, hx5), 1))\n        hx5dup = _upsample_like(hx5d, hx4)\n\n        hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1))\n        hx4dup = _upsample_like(hx4d, hx3)\n\n        hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1))\n        hx3dup = _upsample_like(hx3d, hx2)\n\n        hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1))\n        hx2dup = _upsample_like(hx2d, hx1)\n\n        hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1))\n\n        # side output\n        d1 = self.side1(hx1d)\n\n        d2 = self.side2(hx2d)\n        d2 = _upsample_like(d2, d1)\n\n        d3 = self.side3(hx3d)\n        d3 = _upsample_like(d3, d1)\n\n        d4 = self.side4(hx4d)\n        d4 = _upsample_like(d4, d1)\n\n        d5 = self.side5(hx5d)\n        d5 = _upsample_like(d5, d1)\n\n        d6 = self.side6(hx6)\n        d6 = _upsample_like(d6, d1)\n\n        d0 = self.outconv(torch.cat((d1, d2, d3, d4, d5, d6), 1))\n\n        return [torch.sigmoid(x) for x in [d0, d1, d2, d3, d4, d5, d6]]\n\n\n### U^2-Net small ###\nclass U2NETP(nn.Module):\n\n    def __init__(self, in_ch=3, out_ch=1):\n        super(U2NETP, self).__init__()\n\n        self.stage1 = RSU7(in_ch, 16, 64)\n        self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.stage2 = RSU6(64, 16, 64)\n        self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.stage3 = RSU5(64, 16, 64)\n        self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.stage4 = RSU4(64, 16, 64)\n        self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.stage5 = RSU4F(64, 16, 64)\n        self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n        self.stage6 = RSU4F(64, 16, 64)\n\n        # decoder\n        self.stage5d = RSU4F(128, 16, 64)\n        self.stage4d = RSU4(128, 16, 64)\n        self.stage3d = RSU5(128, 16, 64)\n        self.stage2d = RSU6(128, 16, 64)\n        self.stage1d = RSU7(128, 16, 64)\n\n        self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)\n        self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)\n        self.side3 = nn.Conv2d(64, out_ch, 3, padding=1)\n        self.side4 = nn.Conv2d(64, out_ch, 3, padding=1)\n        self.side5 = nn.Conv2d(64, out_ch, 3, padding=1)\n        self.side6 = nn.Conv2d(64, out_ch, 3, padding=1)\n\n        self.outconv = nn.Conv2d(6 * out_ch, out_ch, 1)\n\n    def forward(self, x):\n        hx = x\n\n        # stage 1\n        hx1 = self.stage1(hx)\n        hx = self.pool12(hx1)\n\n        # stage 2\n        hx2 = self.stage2(hx)\n        hx = self.pool23(hx2)\n\n        # stage 3\n        hx3 = self.stage3(hx)\n        hx = self.pool34(hx3)\n\n        # stage 4\n        hx4 = self.stage4(hx)\n        hx = self.pool45(hx4)\n\n        # stage 5\n        hx5 = self.stage5(hx)\n        hx = self.pool56(hx5)\n\n        # stage 6\n        hx6 = self.stage6(hx)\n        hx6up = _upsample_like(hx6, hx5)\n\n        # decoder\n        hx5d = self.stage5d(torch.cat((hx6up, hx5), 1))\n        hx5dup = _upsample_like(hx5d, hx4)\n\n        hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1))\n        hx4dup = _upsample_like(hx4d, hx3)\n\n        hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1))\n        hx3dup = _upsample_like(hx3d, hx2)\n\n        hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1))\n        hx2dup = _upsample_like(hx2d, hx1)\n\n        hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1))\n\n        # side output\n        d1 = self.side1(hx1d)\n\n        d2 = self.side2(hx2d)\n        d2 = _upsample_like(d2, d1)\n\n        d3 = self.side3(hx3d)\n        d3 = _upsample_like(d3, d1)\n\n        d4 = self.side4(hx4d)\n        d4 = _upsample_like(d4, d1)\n\n        d5 = self.side5(hx5d)\n        d5 = _upsample_like(d5, d1)\n\n        d6 = self.side6(hx6)\n        d6 = _upsample_like(d6, d1)\n\n        d0 = self.outconv(torch.cat((d1, d2, d3, d4, d5, d6), 1))\n\n        return [torch.sigmoid(x) for x in [d0, d1, d2, d3, d4, d5, d6]]\n"
  },
  {
    "path": "pipeline/__init__.py",
    "content": ""
  },
  {
    "path": "pipeline/detect.py",
    "content": "import logging\nimport sys\nfrom pathlib import Path\nfrom typing import Literal, List, Tuple\n\nimport numpy\nimport torch\nimport torch.backends.cudnn\nfrom numpy import number\nfrom torch import Tensor, nn\nfrom torchvision.ops import box_convert\nfrom torchvision.utils import draw_bounding_boxes\n\nfrom functions.get_param_groups import get_param_groups\nfrom module.detect.models.common import Conv\nfrom module.detect.models.yolo import Model\nfrom module.detect.utils.general import labels_to_class_weights, non_max_suppression\nfrom module.detect.utils.loss import ComputeLoss\nfrom module.detect.utils.metrics import box_iou\n\n\nclass Detect:\n    r\"\"\"\n    Init detect pipeline to detect objects from fused images.\n    \"\"\"\n\n    def __init__(self, config, mode: Literal['train', 'inference'], nc: int, classes: List[str], labels: List[Tensor]):\n        # attach hyper parameters\n        self.config = config\n        self.mode = mode  # fuse computation mode: train(grad+graph), eval(graph), inference(x)\n\n        # init device\n        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n        logging.info(f'deploy {config.detect.model} on device {str(device)}')\n        self.device = device\n\n        # init yolo model\n        model_t = config.detect.model\n        config_p = Path(__file__).parents[1] / 'module' / 'detect' / 'models' / f'{model_t}.yaml'\n        net = Model(cfg=config_p, ch=config.detect.channels, nc=nc).to(self.device)\n        logging.info(f'init {model_t} with (nc: {nc})')\n        self.net = net\n\n        # init hyperparameters\n        hyp = config.loss.detect\n        nl = net.model[-1].nl  # number of detection layers\n\n        # model parameters\n        hyp['box'] *= 3 / nl  # scale to layers\n        hyp['cls'] *= nc / 80 * 3 / nl  # scale to classes and layers\n        hyp['obj'] *= (config.train.image_size[0] / 640) ** 2 * 3 / nl  # scale to image size and layers\n        hyp['label_smoothing'] = False  # label smoothing\n\n        # attach constants\n        net.nc = nc  # attach number of classes to model\n        net.hyp = hyp  # attach hyper parameters to model\n        net.class_weights = labels_to_class_weights(labels, nc).to(self.device)  # attach class weights\n        net.names = classes\n\n        # load pretrained parameters (optional)\n        d_ckpt = config.detect.pretrained\n        if d_ckpt is not None:\n            if 'http' in d_ckpt:\n                ckpt_p = Path.cwd() / 'weights' / 'v1' / 'tardal.pth'\n                url = d_ckpt\n                logging.info(f'download pretrained parameters from {url}')\n                try:\n                    ckpt = torch.hub.load_state_dict_from_url(url, model_dir=ckpt_p.parent, map_location='cpu')\n                except Exception as err:\n                    logging.fatal(f'connect to {url} failed: {err}, try download pretrained weights manually')\n                    sys.exit(1)\n            else:\n                ckpt = torch.load(d_ckpt, map_location='cpu')\n            self.load_ckpt(ckpt)\n\n        # criterion (reference: YOLOv5 official)\n        self.loss = ComputeLoss(net)\n\n    def load_ckpt(self, ckpt: dict):\n        ckpt = ckpt if 'detect' not in ckpt else ckpt['detect']\n        self.net.load_state_dict(ckpt)\n\n    def load_ckpt_fuse(self, ckpt: dict):\n        ckpt = ckpt if 'detect' not in ckpt else ckpt['detect']\n        # fuse conv & bn\n        self.net.fuse()\n        # compatibility updates\n        for m in self.net.modules():\n            t = type(m)\n            if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):\n                m.inplace = True  # torch 1.7.0 compatibility\n                if t is Detect and not isinstance(m.anchor_grid, list):\n                    delattr(m, 'anchor_grid')\n                    setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)\n            elif t is Conv:\n                m._non_persistent_buffers_set = set()  # torch 1.6.0 compatibility\n            elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):\n                m.recompute_scale_factor = None  # torch 1.11.0 compatibility\n        # return as expect\n        self.net.load_state_dict(ckpt)\n\n    def save_ckpt(self) -> dict:\n        ckpt = {'detect': self.net.state_dict()}\n        return ckpt\n\n    def forward(self, imgs: Tensor) -> Tensor:\n        self.net.train()\n        pred = self.net(imgs)\n        return pred\n\n    @torch.no_grad()\n    def eval(self, imgs: Tensor, targets: Tensor, stats: List, preview: bool = False) -> Tuple[int, Tensor | None]:\n        self.net.eval()\n\n        # forward\n        preds, _ = self.net(imgs)  # (xyxy, conf, cls) [h, w]\n\n        # convert pred format\n        batch_size, _, height, width = imgs.shape\n        targets[:, 2:] *= torch.tensor((width, height, width, height), device=self.device)  # (id, cls, xyxy) [1, 1] -> [h, w]\n        preds = non_max_suppression(preds, conf_thres=0.001, iou_thres=0.6, labels=[], multi_label=True)  # (xyxy, conf, cls) [h, w]\n\n        # const\n        iou_v = torch.linspace(0.5, 0.95, 10).to(self.device)  # iou vector for mAP@0.5:0.95\n        n_iou = iou_v.numel()\n\n        # record\n        seen = 0\n\n        # statistics per images\n        for si, pred in enumerate(preds):\n            labels = targets[targets[:, 0] == si, 1:]  # (cls, xyxy) [h, w]\n\n            num_l, num_p = labels.shape[0], pred.shape[0]\n            correct = torch.zeros(num_p, n_iou, dtype=torch.bool, device=self.device)\n            seen += 1\n\n            # no pred result\n            if num_p == 0:\n                if num_l:\n                    stats.append((correct, *torch.zeros((2, 0), device=self.device), labels[:, 0]))\n                continue\n\n            # predictions\n            pred_n = pred.clone()\n\n            # evaluate\n            if num_l:\n                t_box = labels[:, 1:5]  # (xyxy) [h, w]\n                labels_n = torch.cat((labels[:, 0:1], t_box), 1)  # (xyxy, cls) [h, w]\n                correct = self.process_batch(pred_n, labels_n, iou_v)\n\n            # update stats matrix\n            stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0]))  # (correct, conf, pcls, tcls)\n\n        # preview\n        if preview:\n            prv = self.preview(imgs, preds)\n            return seen, prv\n\n        # return as expected\n        return seen, None\n\n    @torch.inference_mode()\n    def inference(self, imgs: Tensor) -> Tensor:\n        self.net.eval()\n        # forward\n        preds, _ = self.net(imgs)\n        # convert pred format\n        batch_size, _, height, width = imgs.shape\n        preds = non_max_suppression(preds, conf_thres=0.001, iou_thres=0.6, multi_label=True)  # [xyxy, conf, cls]\n        # return as expected\n        return preds\n\n    def criterion(self, imgs: Tensor, targets: Tensor) -> Tuple[Tensor, List[number]]:\n        \"\"\"\n        criterion on detector\n        \"\"\"\n\n        logging.debug('criterion on yolo')\n\n        # forward\n        pred = self.forward(imgs)  # (bs, 3, 80, 80, class + 5)\n\n        # calculate loss\n        targets[:, 2:] = box_convert(targets[:, 2:], 'xyxy', 'cxcywh')  # (idx, cls, x1, y1, x2, y2) -> (idx, cls, cx, cy, w, h)\n        loss, loss_items = self.loss(pred, targets.to(self.device))\n\n        return loss, [x.item() for x in loss_items]\n\n    @staticmethod\n    def preview(imgs: Tensor, preds: Tensor, conf_th: float = 0.6):\n        imgs_mk = []\n        # preds: (xyxy, conf, cls)\n\n        # mark on images\n        for img, pred in zip(imgs, preds):\n            pred = list(filter(lambda x: x[4] > conf_th, pred))\n            logging.debug(f'detect {len(pred)} on images')\n            img = (img * 255).type(torch.uint8)\n            boxes = [x[:4] for x in pred]\n            cls = [int(x[5].cpu().numpy()) for x in pred]\n            labels = [f'{[cls]}: {x[4].cpu().numpy():.2f}' for cls, x in zip(cls, pred)]\n            if len(boxes):\n                img = draw_bounding_boxes(img, torch.stack(boxes, dim=0), labels=labels, width=2)\n            imgs_mk.append((img / 255).float().to(imgs.device))\n\n        # fill or crop to 9 images\n        if len(imgs_mk) > 9:\n            imgs_mk = imgs_mk[:9]\n        elif len(imgs_mk) < 9:\n            zeros = [torch.zeros_like(imgs_mk[0], device=imgs[0].device) for _ in range(9 - len(imgs_mk))]\n            imgs_mk = imgs_mk + zeros\n\n        # merge images(9, 3, h, w) to one image (3, 3h, 3w)\n        imgs_mk = torch.stack(imgs_mk, dim=0)\n        imgs_c = []\n        for i in range(3):\n            t = [imgs_mk[i * 3 + j] for j in range(3)]  # [(3, h, w), (3, h, w), (3, h, w)]\n            imgs_c.append(torch.cat(t, dim=2))  # (3, h, 3w)\n        imgs_one = torch.cat(imgs_c, dim=1)  # (3, 3h, 3w)\n\n        # return as expected\n        return imgs_one\n\n    def param_groups(self) -> tuple[List, List, List]:\n        group = [], [], []\n        tmp = get_param_groups(self.net)\n        for idx in range(3):\n            group[idx].extend(tmp[idx])\n        return group\n\n    @staticmethod\n    def process_batch(detections, labels, iou_v):\n        \"\"\"\n        Return correct predictions' matrix. Both sets of boxes are in (x1, y1, x2, y2) format.\n        Arguments:\n            detections (Array[N, 6]), x1, y1, x2, y2, conf, class\n            labels (Array[M, 5]), class, x1, y1, x2, y2\n            iou_v (Array[10]), iou thresholds\n        Returns:\n            correct (Array[N, 10]), for 10 IoU levels\n        \"\"\"\n        correct = torch.zeros(detections.shape[0], iou_v.shape[0], dtype=torch.bool, device=iou_v.device)\n        iou = box_iou(labels[:, 1:], detections[:, :4])\n        x = torch.where((iou >= iou_v[0]) & (labels[:, 0:1] == detections[:, 5]))  # IoU above threshold and classes match\n        if x[0].shape[0]:\n            matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()  # [label, detection, iou]\n            if x[0].shape[0] > 1:\n                matches = matches[matches[:, 2].argsort()[::-1]]\n                matches = matches[numpy.unique(matches[:, 1], return_index=True)[1]]\n                # matches = matches[matches[:, 2].argsort()[::-1]]\n                matches = matches[numpy.unique(matches[:, 0], return_index=True)[1]]\n            matches = torch.Tensor(matches).to(iou_v.device)\n            correct[matches[:, 1].long()] = matches[:, 2:3] >= iou_v\n        return correct\n"
  },
  {
    "path": "pipeline/fuse.py",
    "content": "import logging\nimport sys\nfrom pathlib import Path\nfrom typing import Literal, List, Tuple, Optional\n\nimport torch\nimport torch.backends.cudnn\nfrom kornia.filters import spatial_gradient\nfrom kornia.losses import MS_SSIMLoss, ssim_loss\nfrom numpy import number\nfrom torch import Tensor\nfrom torch.nn.functional import l1_loss\n\nfrom functions.div_loss import div_loss\nfrom functions.get_param_groups import get_param_groups\nfrom module.fuse.discriminator import Discriminator\nfrom module.fuse.generator import Generator\n\n\nclass Fuse:\n    r\"\"\"\n    Init fuse pipeline to generate fused images from infrared and visible images.\n    \"\"\"\n\n    def __init__(self, config, mode: Literal['train', 'inference']):\n        # attach hyper parameters\n        self.config = config\n        self.mode = mode  # fuse computation mode: train(grad+graph), eval(graph), inference(x)\n        modules = []\n\n        # init device\n        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n        logging.info(f'deploy tardal-fuse on device {str(device)}')\n        self.device = device\n\n        # init tardal generator\n        f_dim, f_depth = config.fuse.dim, config.fuse.depth\n        generator = Generator(dim=f_dim, depth=f_depth)\n        modules.append(generator)\n        logging.info(f'init generator with (dim: {f_dim} depth: {f_depth})')\n        self.generator = generator\n\n        # init tardel discriminator during train mode\n        if mode == 'train':\n            f_size = config.train.image_size\n            dis_t = Discriminator(dim=f_dim, size=f_size)\n            dis_d = Discriminator(dim=f_dim, size=f_size)\n            modules += [dis_t, dis_d]\n            logging.info(f'init discriminators with (dim: {f_dim} size: {f_size})')\n            self.dis_t, self.dis_d = dis_t, dis_d\n\n        # load pretrained parameters (optional)\n        f_ckpt = config.fuse.pretrained\n        if f_ckpt is not None:\n            if 'http' in f_ckpt:\n                ckpt_p = Path.cwd() / 'weights' / 'v1' / 'tardal.pth'\n                url = f_ckpt\n                logging.info(f'download pretrained parameters from {url}')\n                try:\n                    ckpt = torch.hub.load_state_dict_from_url(url, model_dir=ckpt_p.parent, map_location='cpu')\n                except Exception as err:\n                    logging.fatal(f'connect to {url} failed: {err}, try download pretrained weights manually')\n                    sys.exit(1)\n            else:\n                ckpt = torch.load(f_ckpt, map_location='cpu')\n            self.load_ckpt(ckpt)\n\n        # criterion\n        if config.loss.fuse.src_fn == 'v1':\n            ms_ssim_loss = MS_SSIMLoss()\n            modules.append(ms_ssim_loss)\n            self.ms_ssim_loss = ms_ssim_loss\n\n        # move to device\n        _ = [x.to(device) for x in modules]\n\n        # more parameters\n        # WGAN div hyper parameters\n        self.wk, self.wp = 2, 6\n\n    def load_ckpt(self, ckpt: dict):\n        f_ckpt = ckpt if 'fuse' not in ckpt else ckpt['fuse']\n\n        # check eval mode\n        if self.config.inference.use_eval is None:\n            if 'use_eval' in f_ckpt:\n                logging.warning(f'overwriting inference.use_eval {self.config.inference.use_eval} with {f_ckpt[\"use_eval\"]}')\n                self.config.inference.use_eval = f_ckpt['use_eval']\n            else:\n                logging.warning(f'no use_eval settings found, using default (true)')\n                self.config.inference.use_eval = True\n        if 'use_eval' in f_ckpt:\n            f_ckpt.pop('use_eval')\n\n        # load state dict\n        self.generator.load_state_dict(f_ckpt)\n        if self.mode == 'train' and 'disc' in ckpt:\n            self.dis_t.load_state_dict(ckpt['disc']['t'])\n            self.dis_d.load_state_dict(ckpt['disc']['d'])\n\n    def save_ckpt(self) -> dict:\n        ckpt = {'fuse': self.generator.state_dict()}\n        if self.mode == 'train':\n            ckpt |= {'disc': {'t': self.dis_t.state_dict(), 'd': self.dis_t.state_dict()}}\n        return ckpt\n\n    def forward(self, ir: Tensor, vi: Tensor) -> Tensor:\n        self.generator.train()\n        fus = self.generator(ir, vi)\n        return fus\n\n    @torch.no_grad()\n    def eval(self, ir: Tensor, vi: Tensor) -> Tensor:\n        self.generator.eval()\n        fus = self.generator(ir, vi)\n        return fus\n\n    @torch.inference_mode()\n    def inference(self, ir: Tensor, vi: Tensor) -> Tensor:\n        if self.config.inference.use_eval:\n            self.generator.eval()\n        fus = self.generator(ir, vi)\n        return fus\n\n    def criterion_dis_t(self, ir: Tensor, vi: Tensor, mk: Tensor) -> Tensor:\n        \"\"\"\n        criterion on target discriminator 'ir * m <- pixel distribution -> fus * m'\n        \"\"\"\n\n        logging.debug('criterion on target discriminator')\n\n        # switch to train mode\n        self.dis_t.train()\n\n        # sample real & fake\n        real_s = ir * mk\n        fake_s = self.eval(ir, vi) * mk\n        fake_s.detach_()\n\n        # judge value towards real & fake\n        real_v = torch.squeeze(self.dis_t(real_s))\n        fake_v = torch.squeeze(self.dis_t(fake_s))\n\n        # loss calculate\n        real_l, fake_l = -real_v.mean(), fake_v.mean()\n        div = div_loss(self.dis_t, real_s, fake_s, self.wp)\n        loss = real_l + fake_l + self.wk * div\n\n        return loss\n\n    def criterion_dis_d(self, ir: Tensor, vi: Tensor, mk: Tensor) -> Tensor:\n        \"\"\"\n        criterion on detail discriminator 'vi * m <- grad distribution -> fus * (1-m)'\n        mask: optional\n        \"\"\"\n\n        logging.debug('criterion on detail discriminator')\n\n        # switch to train mode\n        self.dis_d.train()\n\n        # sample real & fake\n        mk = mk if self.config.loss.fuse.d_mask else 0  # use mask or not\n        real_s = self.gradient(vi) * (1 - mk)\n        fake_s = self.gradient(self.eval(ir, vi)) * (1 - mk)\n        fake_s.detach_()\n\n        # judge value towards real & fake\n        real_v = torch.squeeze(self.dis_d(real_s))\n        fake_v = torch.squeeze(self.dis_d(fake_s))\n\n        # loss calculate\n        real_l, fake_l = -real_v.mean(), fake_v.mean()\n        div = div_loss(self.dis_d, real_s, fake_s, self.wp)\n        loss = real_l + fake_l + self.wk * div\n\n        return loss\n\n    def criterion_generator(self, ir: Tensor, vi: Tensor, mk: Tensor, w1: Tensor, w2: Tensor, d_warming: bool = True):\n        \"\"\"\n        criterion on generator 'ir, vi <- loss -> fus'\n        return: Tuple[Tensor, List[number]] (only fuse), Tuple[Tensor, Tensor, List[number]] (joint mode)\n        \"\"\"\n\n        logging.debug('criterion on generator')\n\n        # forward (train mode for calculate loss)\n        fus = self.forward(ir, vi)\n\n        # calculate src and adv loss\n        f_loss = self.config.loss.fuse\n        src_w, adv_w = f_loss.src, f_loss.adv\n        adv_w = 0 if d_warming else adv_w\n        src_l = w1 * self.src_loss(fus, ir) + w2 * self.src_loss(fus, vi)\n        adv_l, tar_l, det_l = self.adv_loss(fus, mk)\n        loss = src_w * src_l.mean() + adv_w * adv_l.mean()\n\n        # only fuse\n        return loss, [src_l.mean().item(), adv_l.mean().item(), tar_l, det_l]\n\n    @staticmethod\n    def gradient(x: Tensor, eps: float = 1e-8) -> Tensor:\n        s = spatial_gradient(x, 'sobel')\n        dx, dy = s[:, :, 0, :, :], s[:, :, 1, :, :]\n        u = torch.sqrt(torch.pow(dx, 2) + torch.pow(dy, 2) + eps)  # sqrt backwork x range: (0, n]\n        return u\n\n    def src_loss(self, x: Tensor, y: Tensor) -> Tensor:\n        src_fn = self.config.loss.fuse.src_fn\n        match src_fn:\n            case 'v0':\n                \"fus <- 0.01*ssim + 0.99*l1 -> src\"\n                return 0.01 * ssim_loss(x, y, window_size=11) + 0.99 * l1_loss(x, y)\n            case 'v1':\n                \"fus <- ms-ssim -> src\"\n                return self.ms_ssim_loss(x, y)\n            case _:\n                assert NotImplemented, f'unsupported src function: {src_fn}'\n\n    def adv_loss(self, fus: Tensor, mk: Tensor) -> Tuple[Tensor, number, number]:\n        # weights\n        f_loss = self.config.loss.fuse\n        tar_w, det_w = f_loss.t_adv, f_loss.d_adv\n        # target loss\n        self.dis_t.eval()\n        tar_l = -self.dis_t(fus * mk)  # fus * m -> target pixel distribution (max -> -min)\n        # detail loss\n        self.dis_d.eval()\n        mk = mk if self.config.loss.fuse.d_mask else 0  # use mask or not\n        det_l = -self.dis_d(self.gradient(fus) * (1 - mk))  # grad(fus) * (1-m) -> grad distribution (max -> -min)\n        return tar_w * tar_l + det_w * det_l, tar_l.mean().item(), det_l.mean().item()\n\n    def param_groups(self, key: Optional[Literal['g', 'd']] = None) -> tuple[List, List, List]:\n        match key:\n            case 'g':\n                return self.g_params()\n            case 'd':\n                return self.d_params()\n            case _:\n                g_params, d_params = self.g_params(), self.d_params()\n                group = [], [], []\n                for idx in range(3):\n                    group[idx].extend(g_params[idx])\n                    group[idx].extend(d_params[idx])\n                return group\n\n    def g_params(self) -> tuple[List, List, List]:\n        return get_param_groups(self.generator)\n\n    def d_params(self) -> tuple[List, List, List]:\n        group = [], [], []\n        for module in [self.dis_t, self.dis_d]:\n            tmp = get_param_groups(module)\n            for idx in range(3):\n                group[idx].extend(tmp[idx])\n        return group\n"
  },
  {
    "path": "pipeline/iqa.py",
    "content": "import logging\nimport socket\nimport sys\nfrom pathlib import Path\nfrom typing import Literal\n\nimport cv2\nimport torch.cuda\nfrom kornia import image_to_tensor, tensor_to_image\nfrom torch import Tensor\nfrom torchvision.models import vgg16\nfrom torchvision.transforms import Compose, Resize, Normalize\nfrom tqdm import tqdm\n\n\nclass IQA:\n    r\"\"\"\n    Init information measurement pipeline to generate iqa from source images.\n    \"\"\"\n\n    def __init__(self, url: str):\n        # init device\n        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n        logging.info(f'deploy iqa on device {str(device)}')\n        self.device = device\n\n        # init vgg backbone\n        extractor = vgg16().features\n        logging.info(f'init iqa extractor with (3 -> 1)')\n        self.extractor = extractor\n\n        # download pretrained parameters\n        ckpt_p = Path.cwd() / 'weights' / 'v1' / 'iqa.pth'\n        logging.info(f'download pretrained iqa weights from {url}')\n        socket.setdefaulttimeout(5)\n        try:\n            logging.info(f'starting download of pretrained weights from {url}')\n            ckpt = torch.hub.load_state_dict_from_url(url, model_dir=ckpt_p.parent, map_location='cpu')\n        except Exception as err:\n            logging.fatal(f'load {url} failed: {err}, try download pretrained weights manually')\n            sys.exit(1)\n        extractor.load_state_dict(ckpt)\n        logging.info(f'load pretrained iqa weights from {str(ckpt_p)}')\n\n        # move to device\n        extractor.to(device)\n\n        # more parameters\n        self.transform_fn = Compose([Resize((672, 672)), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n        self.upsample = Resize((672, 672))\n\n    @torch.inference_mode()\n    def inference(self, src: str | Path, dst: str | Path):\n        self.modality_inference(src, dst, 'ir')\n        self.modality_inference(src, dst, 'vi')\n\n    @torch.inference_mode()\n    def modality_inference(self, src: str | Path, dst: str | Path, modality: Literal['ir', 'vi']):\n        # create save folder\n        dst = Path(dst / modality)\n        dst.mkdir(parents=True, exist_ok=True)\n        logging.debug(f'create save folder {str(dst)}')\n\n        # forward\n        self.extractor.eval()\n        img_list = sorted(Path(src / modality).rglob('*.png'))\n        logging.info(f'load {len(img_list)} images from {str(src)}')\n        process = tqdm(img_list)\n        for img_p in process:\n            process.set_description(f'generate iqa for {img_p.name} to {str(dst)}')\n            img = self._imread(img_p).to(self.device)\n            reverse_fn = Resize(size=img.shape[-2:])\n            iqa = self.extractor_inference(img.unsqueeze(0))[0]\n            iqa = reverse_fn(iqa).squeeze()\n            cv2.imwrite(str(dst / img_p.name), tensor_to_image(iqa) * 255)\n\n    @torch.inference_mode()\n    def extractor_inference(self, x: Tensor) -> Tensor:\n        # information measurement\n        l_ids = [3, 8, 15, 22, 29]  # layers before max-pooling\n        f = []\n        x = x.repeat(1, 3, 1, 1) if x.size(dim=1) == 1 else x\n        x = self.transform_fn(x)\n        for index, layer in enumerate(self.extractor):\n            x = layer(x)\n            if index in l_ids:\n                t = x.mean(axis=1, keepdims=True)\n                f.append(self.upsample(t))\n        f = torch.cat(f, dim=1).mean(axis=1, keepdims=True)\n        return f\n\n    @staticmethod\n    def _imread(img_p: str | Path):\n        img = cv2.imread(str(img_p), cv2.IMREAD_GRAYSCALE)\n        img = image_to_tensor(img).float() / 255\n        return img\n"
  },
  {
    "path": "pipeline/saliency.py",
    "content": "import logging\nimport socket\nimport sys\nimport warnings\nfrom pathlib import Path\n\nimport cv2\nimport torch.hub\nfrom kornia import image_to_tensor, tensor_to_image\nfrom torchvision.transforms import Resize, Compose, Normalize\nfrom tqdm import tqdm\n\nfrom module.saliency.u2net import U2NETP\n\n\nclass Saliency:\n    r\"\"\"\n    Init saliency detection pipeline to generate mask from infrared images.\n    \"\"\"\n\n    def __init__(self, url: str):\n        # init device\n        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n        logging.info(f'deploy u2net on device {str(device)}')\n        self.device = device\n\n        # init u2net small (u2netp)\n        net = U2NETP(in_ch=1, out_ch=1)\n        logging.info(f'init u2net small model with (1 -> 1)')\n        self.net = net\n\n        # download pretrained parameters\n        ckpt_p = Path.cwd() / 'weights' / 'v1' / 'u2netp.pth'\n        logging.info(f'download pretrained u2net weights from {url}')\n        socket.setdefaulttimeout(5)\n        try:\n            logging.info(f'starting download of pretrained weights from {url}')\n            ckpt = torch.hub.load_state_dict_from_url(url, model_dir=ckpt_p.parent, map_location='cpu')\n        except Exception as err:\n            logging.fatal(f'load {url} failed: {err}, try download pretrained weights manually')\n            sys.exit(1)\n        net.load_state_dict(ckpt)\n        logging.info(f'load pretrained u2net weights from {str(ckpt_p)}')\n\n        # move to device\n        net.to(device)\n\n        # more parameters\n        self.transform_fn = Compose([Resize(size=(320, 320)), Normalize(mean=0.485, std=0.229)])\n\n    @torch.inference_mode()\n    def inference(self, src: str | Path, dst: str | Path):\n        # create save folder\n        dst = Path(dst)\n        dst.mkdir(parents=True, exist_ok=True)\n        logging.debug(f'create save folder {str(dst)}')\n\n        # forward\n        self.net.eval()\n        warnings.filterwarnings(action='ignore', lineno=780)\n        img_list = sorted(Path(src).rglob('*.png'))\n        logging.info(f'load {len(img_list)} images from {str(src)}')\n        process = tqdm(img_list)\n        for img_p in process:\n            process.set_description(f'generate mask for {img_p.name} to {str(dst)}')\n            img = self._imread(img_p).to(self.device)\n            reverse_fn = Resize(size=img.shape[-2:])\n            img = self.transform_fn(img)\n            mask = self.net(img.unsqueeze(0))[0]\n            mask = (mask - mask.min()) / (mask.max() - mask.min())\n            mask = reverse_fn(mask).squeeze()\n            cv2.imwrite(str(dst / img_p.name), tensor_to_image(mask) * 255)\n\n    @staticmethod\n    def _imread(img_p: str | Path):\n        img = cv2.imread(str(img_p), cv2.IMREAD_GRAYSCALE)\n        img = image_to_tensor(img).float() / 255\n        return img\n"
  },
  {
    "path": "pipeline/train.py",
    "content": "import logging\nfrom functools import reduce\nfrom pathlib import Path\n\nimport torch\nimport wandb\nfrom kornia.filters import SpatialGradient\nfrom kornia.losses import SSIMLoss\nfrom kornia.metrics import AverageMeter\nfrom torch import nn, Tensor\nfrom torch.optim import RMSprop\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom tqdm import tqdm\n\nfrom functions.div_loss import div_loss\nfrom modules.discriminator import Discriminator\nfrom modules.generator import Generator\nfrom utils.environment_probe import EnvironmentProbe\nfrom utils.fusion_data import FusionData\n\n\nclass Train:\n    \"\"\"\n    The train process for TarDAL.\n    \"\"\"\n\n    def __init__(self, environment_probe: EnvironmentProbe, config: dict):\n        logging.info(f'TarDAL Training | mask: {config.mask} | weight: {config.weight} | adv: {config.adv_weight}')\n        self.config = config\n        self.environment_probe = environment_probe\n\n        # modules\n        logging.info(f'generator | dim: {config.dim} | depth: {config.depth}')\n        self.generator = Generator(config.dim, config.depth)\n        logging.info(f'discriminator | dim: {config.dim} | size: {config.size}')\n        self.dis_target = Discriminator(config.dim, (config.size, config.size))\n        self.dis_detail = Discriminator(config.dim, (config.size, config.size))\n\n        # WGAN adam optim\n        logging.info(f'RMSprop | learning rate: {config.learning_rate}')\n        self.opt_generator = RMSprop(self.generator.parameters(), lr=config.learning_rate)\n        self.opt_dis_target = RMSprop(self.dis_target.parameters(), lr=config.learning_rate)\n        self.opt_dis_detail = RMSprop(self.dis_detail.parameters(), lr=config.learning_rate)\n\n        # move to device\n        logging.info(f'module device: {environment_probe.device}')\n        self.generator.to(environment_probe.device)\n        self.dis_target.to(environment_probe.device)\n        self.dis_detail.to(environment_probe.device)\n\n        # loss\n        self.l1 = nn.L1Loss(reduction='none')\n        self.ssim = SSIMLoss(window_size=11, reduction='none')\n        self.l1.cuda()\n        self.ssim.cuda()\n\n        # functions\n        self.spatial = SpatialGradient('diff')\n\n        # WGAN div hyper parameters\n        self.wk, self.wp = 2, 6\n\n        # datasets\n        folder = Path(config.folder)\n        resize = transforms.Resize((config.size, config.size))\n        dataset = FusionData(folder, config.mask, 'train', transforms=resize)\n        self.dataloader = DataLoader(dataset, config.batch_size, True, num_workers=config.num_workers, pin_memory=True)\n        logging.info(f'dataset | folder: {str(folder)} | size: {len(self.dataloader) * config.batch_size}')\n\n    def train_dis_target(self, ir: Tensor, vi: Tensor, mk: Tensor) -> Tensor:\n        \"\"\"\n        Train target discriminator for 'ir * m <- pixel -> fus * m'\n        \"\"\"\n\n        logging.debug('train target discriminator')\n        # switch to train mode\n        self.dis_target.train()\n\n        # sample real & fake\n        real_s = ir * mk\n        self.generator.eval()\n        fake_s = self.generator(ir, vi).detach() * mk\n\n        # judge value towards real & fake\n        real_v = torch.squeeze(self.dis_target(real_s))\n        fake_v = torch.squeeze(self.dis_target(fake_s))\n\n        # loss calculate\n        real_l, fake_l = -real_v.mean(), fake_v.mean()\n        div = div_loss(self.dis_target, real_s, fake_s, self.wp)\n        loss = real_l + fake_l + self.wk * div\n\n        # backward\n        self.opt_dis_target.zero_grad()\n        loss.backward()\n        self.opt_dis_target.step()\n\n        return loss.item()\n\n    def train_dis_detail(self, ir: Tensor, vi: Tensor, mk: Tensor) -> Tensor:\n        \"\"\"\n        Train detail discriminator for 'vi * (1-m) <- Grad -> fus * (1-m)'\n        \"\"\"\n\n        logging.debug('train detail discriminator')\n        # switch to train mode\n        self.dis_detail.train()\n\n        # sample real & fake\n        real_s = self.gradient(vi * (1 - mk))\n        self.generator.eval()\n        fake_s = self.gradient(self.generator(ir, vi).detach() * (1 - mk))\n\n        # judge value towards real & fake\n        real_v = torch.squeeze(self.dis_detail(real_s))\n        fake_v = torch.squeeze(self.dis_detail(fake_s))\n\n        # loss calculate\n        real_l, fake_l = -real_v.mean(), fake_v.mean()\n        div = div_loss(self.dis_detail, real_s, fake_s, self.wp)\n        loss = real_l + fake_l + self.wk * div\n\n        # backward\n        self.opt_dis_detail.zero_grad()\n        loss.backward()\n        self.opt_dis_detail.step()\n\n        return loss.item()\n\n    def gradient(self, x: Tensor, eps: float = 1e-6) -> Tensor:\n        s = self.spatial(x)\n        dx, dy = s[:, :, 0, :, :], s[:, :, 1, :, :]\n        u = torch.sqrt(torch.pow(dx, 2) + torch.pow(dy, 2) + eps)\n        return u\n\n    def train_generator(self, ir: Tensor, vi: Tensor, mk: Tensor, s1: Tensor, s2: Tensor) -> dict:\n        \"\"\"\n        Train generator 'ir + vi -> fus'\n        \"\"\"\n\n        logging.debug('train generator')\n        self.generator.train()\n        fus = self.generator(ir, vi)\n\n        # calculate loss towards criterion\n        b1, b2, b3 = self.config.weight  # b1 * ssim + b2 * l1 + b3 * adv\n\n        l_ir = b1 * self.ssim(fus, ir) + b2 * self.l1(fus, ir)\n        l_vi = b1 * self.ssim(fus, vi) + b2 * self.l1(fus, vi)\n\n        w1, w2 = 0.5 + 0.5 * (s1 - s2), 0.5 + 0.5 * (s2 - s1)  # data driven loss weights\n        l_src = w1 * l_ir + w2 * l_vi  # fus <- ssim + l1 -> (ir, vi)\n        l_src = l_src.mean()\n\n        self.dis_target.eval()\n        l_target = -self.dis_target(fus * mk).mean()  # judge target: fus * m\n        self.dis_detail.eval()\n        l_detail = -self.dis_detail(self.gradient(fus * (1 - mk))).mean()  # judge detail: Grad(fus * (1-mk))\n\n        c1, c2 = self.config.adv_weight  # c1 * l_target + c2 * l_detail\n        l_adv = c1 * l_target + c2 * l_detail\n\n        loss = l_src + b3 * l_adv\n\n        # backward\n        self.opt_generator.zero_grad()\n        loss.backward()\n        self.opt_generator.step()\n\n        # loss state\n        state = {\n            'g_loss': loss.item(),\n            'g_src_ir': l_ir.mean().item(),\n            'g_src_vi': l_vi.mean().item(),\n            'g_adv_target': l_target.item(),\n            'g_adv_detail': l_detail.item(),\n        }\n\n        return state\n\n    def run(self):\n        for epoch in range(1, self.config.epochs + 1):\n            process = tqdm(enumerate(self.dataloader), disable=not self.config.debug)\n            meter = AverageMeter()\n            for idx, sample in process:\n                ir, vi, mk = sample['ir'], sample['vi'], sample['mk']\n                s1, s2 = sample['vsm']['ir'], sample['vsm']['vi']\n                im = torch.cat([ir, vi, mk, s1, s2], dim=1)\n                im = im.to(self.environment_probe.device)\n                ir, vi, mk, s1, s2 = torch.chunk(im, 5, dim=1)\n\n                g_loss = self.train_generator(ir, vi, mk, s1, s2)\n                d_target_loss = self.train_dis_target(ir, vi, mk)\n                d_detail_loss = self.train_dis_detail(ir, vi, mk)\n\n                process.set_description(f'g: {g_loss[\"g_loss\"]:03f} | d: {d_target_loss:03f}, {d_detail_loss:03f}')\n                meter.update(Tensor(list(g_loss.values()) + [d_target_loss] + [d_detail_loss]))\n\n            keys = ['g_loss', 'g_src_ir', 'g_src_vi', 'g_adv_t', 'g_adv_d', 'd_t', 'd_d']\n            state = reduce(lambda x, y: x | y, [{k: v} for k, v in zip(keys, meter.avg)])\n            print(state)\n            wandb.log(state)\n            if epoch % 5 == 0:\n                self.save(epoch)\n\n    def save(self, epoch: int):\n        path = Path(self.config.cache) / self.config.id\n        path.mkdir(parents=True, exist_ok=True)\n        cache = path / f'{epoch:03d}.pth'\n        logging.info(f'save checkpoint to {str(cache)}')\n        state = {\n            'g': self.generator.state_dict(),\n            'd': {\n                't': self.dis_target.state_dict(),\n                'd': self.dis_target.state_dict(),\n            },\n            'opt': {\n                'g': self.opt_generator.state_dict(),\n                't': self.opt_dis_target.state_dict(),\n                'd': self.opt_dis_detail.state_dict(),\n            },\n        }\n        torch.save(state, cache)\n"
  },
  {
    "path": "requirements.txt",
    "content": "# TarDAL requirements\n# Usage: pip install -r requirements.txt\n\n# Base ----------------------------------------\nnumpy>=1.20\ntorch>=1.9\ntorchvision>=0.10\nkornia>=0.6.8\nopencv-python>=4.5.5.64\nPyYAML>=6.0\npandas>=1.5.1\nmatplotlib>=3.6.3\n\n# Logging -------------------------------------\nwandb>=0.12.11\ntqdm>=4.63.0\ntabulate>=0.8.9\n"
  },
  {
    "path": "scripts/__init__.py",
    "content": "from scripts.infer_f import InferF\nfrom scripts.infer_fd import InferFD\nfrom scripts.train_f import TrainF\nfrom scripts.train_fd import TrainFD\n\n__all__ = ['TrainF', 'TrainFD', 'InferF', 'InferFD']\n"
  },
  {
    "path": "scripts/infer_f.py",
    "content": "import logging\nfrom pathlib import Path\n\nimport torch\nimport yaml\nfrom kornia.color import ycbcr_to_rgb\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nimport loader\nfrom config import ConfigDict, from_dict\nfrom pipeline.fuse import Fuse\nfrom tools.dict_to_device import dict_to_device\n\n\nclass InferF:\n    def __init__(self, config: str | Path | ConfigDict, save_dir: str | Path):\n        # init logger\n        log_f = '%(asctime)s | %(filename)s[line:%(lineno)d] | %(levelname)s | %(message)s'\n        logging.basicConfig(level='INFO', format=log_f)\n        logging.info(f'TarDAL-v1 Inference Script')\n\n        # init config\n        if isinstance(config, str) or isinstance(config, Path):\n            config = yaml.safe_load(Path(config).open('r'))\n            config = from_dict(config)  # convert dict to object\n        else:\n            config = config\n        self.config = config\n\n        # debug mode\n        if config.debug.fast_run:\n            logging.warning('fast run mode is on, only for debug!')\n\n        # create save(output) folder\n        save_dir = Path(save_dir)\n        save_dir.mkdir(parents=True, exist_ok=True)\n        logging.info(f'create save folder {str(save_dir)}')\n        self.save_dir = save_dir\n\n        # init dataset & dataloader\n        data_t = getattr(loader, config.dataset.name)  # dataset type\n        self.data_t = data_t\n        p_dataset = data_t(root=config.dataset.root, mode='pred', config=config)\n        self.p_loader = DataLoader(\n            p_dataset, batch_size=config.inference.batch_size, shuffle=False,\n            collate_fn=data_t.collate_fn, pin_memory=True, num_workers=config.inference.num_workers,\n        )\n\n        # init pipeline\n        fuse = Fuse(config, mode='inference')\n        self.fuse = fuse\n\n    @torch.inference_mode()\n    def run(self):\n        p_l = tqdm(self.p_loader, total=len(self.p_loader), ncols=120)\n        for sample in p_l:\n            sample = dict_to_device(sample, self.fuse.device)\n            # f_net forward\n            fus = self.fuse.inference(ir=sample['ir'], vi=sample['vi'])\n            # recolor\n            if self.data_t.color and self.config.inference.grayscale is False:\n                fus = torch.cat([fus, sample['cbcr']], dim=1)\n                fus = ycbcr_to_rgb(fus)\n            # save images\n            self.data_t.pred_save(\n                fus, [self.save_dir / name for name in sample['name']],\n                shape=sample['shape']\n            )\n"
  },
  {
    "path": "scripts/infer_fd.py",
    "content": "import logging\nfrom pathlib import Path\n\nimport torch\nimport yaml\nfrom kornia.color import ycbcr_to_rgb\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nimport loader\nfrom config import ConfigDict, from_dict\nfrom pipeline.detect import Detect\nfrom pipeline.fuse import Fuse\nfrom tools.dict_to_device import dict_to_device\n\n\nclass InferFD:\n    def __init__(self, config: str | Path | ConfigDict, save_dir: str | Path):\n        # init logger\n        log_f = '%(asctime)s | %(filename)s[line:%(lineno)d] | %(levelname)s | %(message)s'\n        logging.basicConfig(level='INFO', format=log_f)\n        logging.info(f'TarDAL-v1 Inference Script')\n\n        # init config\n        if isinstance(config, str) or isinstance(config, Path):\n            config = yaml.safe_load(Path(config).open('r'))\n            config = from_dict(config)  # convert dict to object\n        else:\n            config = config\n        self.config = config\n\n        # debug mode\n        if config.debug.fast_run:\n            logging.warning('fast run mode is on, only for debug!')\n\n        # save label as txt warning\n        if config.inference.save_txt:\n            logging.warning('labels will be saved as txt, this will slow down the inference speed!')\n\n        # create save(output) folder\n        save_dir = Path(save_dir)\n        save_dir.mkdir(parents=True, exist_ok=True)\n        (save_dir / 'images').mkdir(exist_ok=True)\n        (save_dir / 'labels').mkdir(exist_ok=True)\n        logging.info(f'create save folder {str(save_dir)}')\n        self.save_dir = save_dir\n\n        # init dataset & dataloader\n        data_t = getattr(loader, config.dataset.name)  # dataset type\n        self.data_t = data_t\n        p_dataset = data_t(root=config.dataset.root, mode='pred', config=config)\n        self.p_loader = DataLoader(\n            p_dataset, batch_size=config.inference.batch_size, shuffle=False,\n            collate_fn=data_t.collate_fn, pin_memory=True, num_workers=config.inference.num_workers,\n        )\n\n        # init pipeline\n        fuse = Fuse(config, mode='inference')\n        self.fuse = fuse\n        detect = Detect(config, mode='inference', nc=len(p_dataset.classes), classes=p_dataset.classes, labels=p_dataset.labels)\n        self.detect = detect\n\n    @torch.inference_mode()\n    def run(self):\n        p_l = tqdm(self.p_loader, total=len(self.p_loader), ncols=80)\n        for sample in p_l:\n            sample = dict_to_device(sample, self.fuse.device)\n            # set description\n            p_l.set_description(f'infer {sample[\"name\"][0]} ({len(sample[\"name\"])} images)')\n            # f_net forward\n            fus = self.fuse.inference(ir=sample['ir'], vi=sample['vi'])\n            # recolor\n            if self.data_t.color and self.config.inference.grayscale is False:\n                fus = torch.cat([fus, sample['cbcr']], dim=1)\n                fus = ycbcr_to_rgb(fus)\n            # d_net forward\n            pred = self.detect.inference(fus)\n            # save images\n            self.data_t.pred_save(\n                fus, [self.save_dir / name for name in sample['name']],\n                shape=sample['shape'], pred=pred,\n                save_txt=self.config.inference.save_txt,\n            )\n"
  },
  {
    "path": "scripts/train_f.py",
    "content": "import argparse\nimport logging\nfrom functools import reduce\nfrom pathlib import Path\n\nimport torch\nimport wandb\nimport yaml\nfrom kornia.metrics import AverageMeter\nfrom torch import Tensor\nfrom torch.optim import AdamW, Adam, SGD\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nimport loader\nfrom config import from_dict, ConfigDict\nfrom pipeline.fuse import Fuse\nfrom tools.dict_to_device import dict_to_device\n\n\nclass TrainF:\n    def __init__(self, config: str | Path | ConfigDict, wandb_key: str):\n        # init logger\n        log_f = '%(asctime)s | %(filename)s[line:%(lineno)d] | %(levelname)s | %(message)s'\n        logging.basicConfig(level='INFO', format=log_f)\n        logging.info(f'TarDAL-v1 Training Script')\n\n        # init config\n        if isinstance(config, str) or isinstance(config, Path):\n            config = yaml.safe_load(Path(config).open('r'))\n            config = from_dict(config)  # convert dict to object\n        else:\n            config = config\n        self.config = config\n\n        # debug mode\n        if config.debug.fast_run:\n            logging.warning('fast run mode is on, only for debug!')\n\n        # wandb run\n        wandb.login(key=wandb_key)  # wandb api key\n        runs = wandb.init(project='TarDAL-v1', config=config, mode=config.debug.wandb_mode)\n        self.runs = runs\n\n        # init save folder\n        save_dir = Path(self.config.save_dir) / self.runs.id\n        save_dir.mkdir(parents=True, exist_ok=True)\n        self.save_dir = save_dir\n        logging.info(f'model weights will be saved to {str(save_dir)}')\n\n        # init pipeline\n        fuse = Fuse(config, mode='train')\n        self.fuse = fuse\n\n        # freeze & grad\n        for k, v in fuse.generator.named_parameters():\n            v.requires_grad = True  # train all layers\n            if any(x in k for x in config.train.freeze):\n                logging.info(f'freezing {k}')\n                v.requires_grad = False\n\n        # init optimizer\n        o_cfg = config.optimizer\n        fuse_pg = fuse.param_groups()  # [weight(with decay), weight(no decay), bias]\n        groups = [\n            {'params': fuse_pg[0], 'lr': o_cfg.lr_i, 'weight_decay': o_cfg.weight_decay},\n            {'params': fuse_pg[1], 'lr': o_cfg.lr_i, 'weight_decay': 0},\n        ]\n        match o_cfg.name:\n            case 'sgd':\n                optimizer = SGD(fuse_pg[2], lr=o_cfg.lr_i, momentum=o_cfg.momentum, nesterov=True)\n            case 'adam':\n                optimizer = Adam(fuse_pg[2], lr=o_cfg.lr_i, betas=(o_cfg.momentum, 0.999))\n            case 'adamw':\n                optimizer = AdamW(fuse_pg[2], lr=o_cfg.lr_i, betas=(o_cfg.momentum, 0.999), weight_decay=0)\n            case _:\n                optimizer = None\n                assert NotImplemented, f'unsupported optimizer: {o_cfg.name}'\n        self.optimizer = optimizer\n        self.optimizer.add_param_group(groups[0])\n        self.optimizer.add_param_group(groups[1])\n\n        # init scheduler\n        lr_fn = lambda x: (1 - x / config.train.epochs) * (1 - o_cfg.lr_f) + o_cfg.lr_f\n        self.scheduler = LambdaLR(self.optimizer, lr_lambda=lr_fn)\n\n        # init dataset & dataloader\n        data_t = getattr(loader, config.dataset.name)  # dataset type\n        t_dataset = data_t(root=config.dataset.root, mode='train', config=config)\n        v_dataset = data_t(root=config.dataset.root, mode='val', config=config)\n        self.t_loader = DataLoader(\n            t_dataset, batch_size=config.train.batch_size, shuffle=True,\n            collate_fn=data_t.collate_fn, pin_memory=True, num_workers=config.train.num_workers,\n        )\n        self.v_loader = DataLoader(\n            v_dataset, batch_size=config.train.batch_size,\n            collate_fn=data_t.collate_fn, pin_memory=True, num_workers=config.train.num_workers,\n        )\n\n    def run(self):\n        # epochs & eval interval & save interval\n        epochs = self.config.train.epochs\n        e_interval = self.config.train.eval_interval\n        s_interval = self.config.train.save_interval\n        # start training process\n        for epoch in range(1, epochs + 1):\n            # train\n            t_l = tqdm(self.t_loader, disable=False, total=len(self.t_loader) if not self.config.debug.fast_run else 3, ncols=120)\n            g_history = [AverageMeter() for _ in range(5)]  # tot, src, adv, tar, det\n            disc_history = AverageMeter(), AverageMeter()  # target, detail\n            log_dict = {}\n            for sample in t_l:\n                sample = dict_to_device(sample, self.fuse.device)\n                # train generator\n                g_loss, [src_l, adv_l, tar_l, det_l] = self.fuse.criterion_generator(\n                    ir=sample['ir'], vi=sample['vi'],\n                    mk=sample['mask'],\n                    w1=sample['ir_w'], w2=sample['vi_w'],\n                    d_warming=epoch <= self.config.loss.fuse.d_warm,\n                )\n                g_history[0].update(g_loss.item())\n                _ = [g_history[idx + 1].update(v) for idx, v in enumerate([src_l, adv_l, tar_l, det_l])]\n                self.optim(g_loss)\n                # train target discriminator\n                d_t_loss = self.fuse.criterion_dis_t(\n                    ir=sample['ir'], vi=sample['vi'],\n                    mk=sample['mask'],\n                )\n                disc_history[0].update(d_t_loss.item())\n                self.optim(d_t_loss)\n                # train detail discriminator\n                d_d_loss = self.fuse.criterion_dis_d(\n                    ir=sample['ir'], vi=sample['vi'],\n                    mk=sample['mask'],\n                )\n                disc_history[1].update(d_d_loss.item())\n                self.optim(d_d_loss)\n                # fast run (jump out)\n                if self.config.debug.fast_run and t_l.n > 2:\n                    logging.info('fast mode: jump')\n                    break\n            # train logs\n            g_l, src_l, adv_l, tar_l, det_l = [g_history[i].avg for i in range(5)]\n            d_t_l, d_d_l = disc_history[0].avg, disc_history[1].avg\n            log_dict |= {'g/tot': g_l, 'g/src': src_l, 'g/adv': adv_l, 'g/tar': d_t_l, 'g/det': d_d_l, 'disc/tar': tar_l, 'disc/det': det_l}\n            logging.info(f'Epoch {epoch}/{epochs} | Generator Loss: {g_l:.4f} | Source Loss: {src_l:.4f} | Adversarial Loss: {adv_l:.4f}')\n\n            # eval (fuse: show in wandb)\n            if epoch % e_interval == 0 or self.config.debug.fast_run:\n                e_l = tqdm(self.v_loader, disable=True)\n                for sample in e_l:\n                    sample = dict_to_device(sample, self.fuse.device)\n                    fus = self.fuse.eval(ir=sample['ir'], vi=sample['vi'])\n                    log_dict |= {'fuse': wandb.Image(fus), 'mask': wandb.Image(sample['mask'])}\n                    break\n            # update scheduler and show lr\n            log_dict |= reduce(lambda x, y: x | y, [{f'lr_{i}': v['lr']} for i, v in enumerate(self.optimizer.param_groups)])\n            self.scheduler.step()\n\n            # update wandb\n            self.runs.log(log_dict)\n            # save model\n            if epoch % s_interval == 0 or self.config.debug.fast_run:\n                ckpt = self.fuse.save_ckpt()\n                torch.save(ckpt, self.save_dir / f'{str(epoch).zfill(5)}.pth')\n                logging.info(f'Epoch {epoch}/{epochs} | Model Saved')\n\n    def optim(self, loss: Tensor):\n        self.optimizer.zero_grad()\n        loss.backward()\n        self.optimizer.step()\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', default='config/default.yaml', help='config file path')\n    parser.add_argument('--auth', help='wandb auth api key')\n    args = parser.parse_args()\n    train = TrainF(args.cfg, args.auth)\n    train.run()\n"
  },
  {
    "path": "scripts/train_fd.py",
    "content": "import argparse\nimport logging\nimport sys\nfrom functools import reduce\nfrom itertools import chain\nfrom pathlib import Path\n\nimport numpy\nimport torch\nimport wandb\nimport yaml\nfrom kornia.color import ycbcr_to_rgb\nfrom kornia.metrics import AverageMeter\nfrom torch.nn.utils import clip_grad_norm_\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nimport loader\nfrom config import from_dict, ConfigDict\nfrom module.detect.utils.metrics import ap_per_class\nfrom pipeline.detect import Detect\nfrom pipeline.fuse import Fuse\nfrom scripts.utils.smart_optimizer import smart_optimizer\nfrom tools.dict_to_device import dict_to_device\n\n\nclass TrainFD:\n    def __init__(self, config: str | Path | ConfigDict, wandb_key: str):\n        # init logger\n        log_f = '%(asctime)s | %(filename)s[line:%(lineno)d] | %(levelname)s | %(message)s'\n        logging.basicConfig(level='INFO', format=log_f)\n        logging.info(f'TarDAL-v1 Training Script')\n\n        # init config\n        if isinstance(config, str) or isinstance(config, Path):\n            config = yaml.safe_load(Path(config).open('r'))\n            config = from_dict(config)  # convert dict to object\n        else:\n            config = config\n        self.config = config\n\n        # debug mode\n        if config.debug.fast_run:\n            logging.warning('fast run mode is on, only for debug!')\n\n        # wandb run\n        wandb.login(key=wandb_key)  # wandb api key\n        runs = wandb.init(project='TarDAL-v1', config=config, mode=config.debug.wandb_mode)\n        self.runs = runs\n\n        # init save folder\n        save_dir = Path(config.save_dir) / runs.id\n        save_dir.mkdir(parents=True, exist_ok=True)\n        self.save_dir = save_dir\n        logging.info(f'model weights will be saved to {str(save_dir)}')\n\n        # load dataset\n        data_t = getattr(loader, config.dataset.name)\n        self.data_t = data_t\n        t_dataset = data_t(root=config.dataset.root, mode='train', config=config)\n        v_dataset = data_t(root=config.dataset.root, mode='val', config=config)\n        if 'detect' not in t_dataset.type:\n            logging.fatal(f'dataset {config.dataset.name} not support detect')\n            sys.exit(1)\n        self.t_loader = DataLoader(\n            t_dataset, batch_size=config.train.batch_size, shuffle=True,\n            collate_fn=data_t.collate_fn, pin_memory=True, num_workers=config.train.num_workers,\n        )\n        self.v_loader = DataLoader(\n            v_dataset, batch_size=config.train.batch_size,\n            collate_fn=data_t.collate_fn, pin_memory=True, num_workers=config.train.num_workers,\n        )\n\n        # init pipeline\n        fuse = Fuse(config, mode='train')\n        self.fuse = fuse\n        detect = Detect(config, mode='train', nc=len(t_dataset.classes), classes=t_dataset.classes, labels=t_dataset.labels)\n        self.detect = detect\n\n        # freeze & grad\n        for k, v in chain(fuse.generator.named_parameters(), detect.net.named_parameters()):\n            v.requires_grad = True  # train all layers\n            if any(x in k for x in config.train.freeze):\n                logging.info(f'freezing {k}')\n                v.requires_grad = False\n\n        # init fuse optimizer\n        o_cfg = config.optimizer\n        f_p, d_p = fuse.param_groups('g'), detect.param_groups()\n        self.fd_opt = smart_optimizer(o_cfg, tuple(f_p[i] + d_p[i] for i in range(3)))\n        self.disc_opt = smart_optimizer(o_cfg, fuse.param_groups('d'), lr=o_cfg.lr_d)\n\n        # init scheduler\n        lr_fn = lambda x: (1 - x / config.train.epochs) * (1 - o_cfg.lr_f) + o_cfg.lr_f\n        self.lr_fn = lr_fn\n        self.scheduler = LambdaLR(self.fd_opt, lr_lambda=lr_fn)\n\n        # hyperparameters check\n        # bridge warm & scheduler warm phase.0\n        if config.loss.bridge.warm != config.scheduler.warmup_epochs[0]:\n            logging.warning(f'overwriting bridge warm {config.loss.bridge.warm} with {config.scheduler.warmup_epochs[0]}')\n            config.loss.bridge.warm = config.scheduler.warmup_epochs[0]\n        # discriminator warm & bridge warm\n        if config.loss.fuse.d_warm >= config.loss.bridge.warm / 2:\n            logging.warning(f'overwriting discriminator warm {config.loss.fuse.d_warm} with {round(config.loss.bridge.warm / 2)}')\n            config.loss.fuse.d_warm = round(config.loss.bridge.warm / 2)\n\n    def run(self):\n        # epochs & eval interval & save interval\n        epochs = self.config.train.epochs\n        e_interval = self.config.train.eval_interval\n        s_interval = self.config.train.save_interval\n        # history switch\n        best_map = -1\n\n        # start training process\n        l_opt_shot = -1\n        n_batch_size = 64\n        accumulate = max(round(n_batch_size / self.config.train.batch_size), 1)\n        for epoch in range(1, epochs + 1):\n            # train\n            t_l = tqdm(self.t_loader, disable=False, total=len(self.t_loader) if not self.config.debug.fast_run else 3, ncols=120)\n            # recorder\n            g_history = AverageMeter()  # generator total loss\n            f_history = [AverageMeter() for _ in range(5)]  # fuse loss: tot, src, adv, tar, det\n            d_history = [AverageMeter() for _ in range(4)]  # detect loss: tot, box, obj, cls\n            disc_history = AverageMeter(), AverageMeter()  # discriminator loss: target, detail\n            log_dict = {}\n            # warm up shots, max(warmup_epochs, 100 shots)\n            w_config = self.config.scheduler\n            w_shots_0 = max(round(w_config.warmup_epochs[0] * len(self.t_loader)), 100)  # bridge warm\n            w_shots_1 = max(round(w_config.warmup_epochs[1] * len(self.t_loader)), 100)  # normal warm\n            w_shots = (w_shots_0, w_shots_1)\n            # process\n            self.fd_opt.zero_grad()\n            for idx, sample in enumerate(t_l):\n                # warm up\n                c_shots = idx + len(self.t_loader) * (epoch - 1)\n                if c_shots < w_shots[0]:\n                    for jdx, x in enumerate(self.fd_opt.param_groups):\n                        x['lr'] = w_config.warmup_bias_lr if jdx == 0 else 0\n                        if 'momentum' in x:\n                            x['momentum'] = w_config.warmup_momentum\n                if w_shots[0] <= c_shots < w_shots[1]:\n                    x_shot = [c_shots, w_shots[1] + c_shots]\n                    # accumulate = max(1, numpy.interp(c_shots, x_shot, [1, n_batch_size / self.config.train.batch_size]).round())\n                    for jdx, x in enumerate(self.fd_opt.param_groups):\n                        o_config = self.config.optimizer\n                        # bias lr falls from 0.1 to lr_i, all other lrs rise from 0.0 to lr_i\n                        w_range = [w_config.warmup_bias_lr if jdx == 0 else 0, x['initial_lr'] * self.lr_fn(epoch - 1)]\n                        x['lr'] = numpy.interp(c_shots, x_shot, w_range)\n                        if 'momentum' in x:\n                            x['momentum'] = numpy.interp(c_shots, x_shot, [w_config.warmup_momentum, o_config.momentum])\n                lr_s = [x['lr'] for x in self.fd_opt.param_groups]\n                logging.debug(f'adjust lr {lr_s[0]:.6f} {lr_s[1]:.6f} {lr_s[2]:.6f}')\n\n                # forward\n                sample = dict_to_device(sample, self.fuse.device)\n\n                # train generator\n                # ir & vi -> f_net -> fus -> d_net -> obj\n                # loss: fus -> src + adv, obj -> ground truth\n                # f_net forward and cal loss\n                f_loss, [src_l, adv_l, tar_l, det_l] = self.fuse.criterion_generator(\n                    ir=sample['ir'], vi=sample['vi'],\n                    mk=sample['mask'],\n                    w1=sample['ir_w'], w2=sample['vi_w'],\n                    d_warming=epoch <= self.config.loss.fuse.d_warm,\n                )\n                fus = self.fuse.eval(ir=sample['ir'], vi=sample['vi'])\n                if epoch <= self.config.loss.bridge.warm:\n                    fus.detach_()  # (det -> det, fuse -> fuse, det no-> fuse)\n                # recolor\n                if self.data_t.color:\n                    fus = torch.cat([fus, sample['cbcr']], dim=1)\n                    fus = ycbcr_to_rgb(fus)\n                # d_net forward and cal loss\n                d_loss, [box_l, obj_l, cls_l] = self.detect.criterion(\n                    imgs=fus,\n                    targets=sample['labels'],\n                )\n                # merge loss\n                b_c = self.config.loss.bridge\n                g_loss = b_c['fuse'] * f_loss + b_c['detect'] * d_loss  # generator total loss\n                g_history.update(g_loss.item())\n                _ = [f_history[idx].update(v) for idx, v in enumerate([f_loss.item(), src_l, adv_l, tar_l, det_l])]\n                _ = [d_history[idx].update(v) for idx, v in enumerate([d_loss.item(), box_l, obj_l, cls_l])]\n                # optimize\n                g_loss.backward()\n                if c_shots - l_opt_shot >= accumulate:\n                    clip_grad_norm_(chain(self.fuse.generator.parameters(), self.detect.net.parameters()), max_norm=10.0)\n                    self.fd_opt.step()\n                    self.fd_opt.zero_grad()\n                    l_opt_shot = c_shots\n                    logging.debug(f'optimize f+d | shots: {c_shots} | accumulate: {accumulate} | last: {l_opt_shot}')\n\n                # train target discriminator\n                d_t_loss = self.fuse.criterion_dis_t(\n                    ir=sample['ir'], vi=sample['vi'],\n                    mk=sample['mask'],\n                )\n                disc_history[0].update(d_t_loss.item())\n                self.disc_opt.zero_grad()\n                d_t_loss.backward()\n                self.disc_opt.step()\n\n                # train detail discriminator\n                d_d_loss = self.fuse.criterion_dis_d(\n                    ir=sample['ir'], vi=sample['vi'],\n                    mk=sample['mask'],\n                )\n                disc_history[1].update(d_d_loss.item())\n                self.disc_opt.zero_grad()\n                d_d_loss.backward()\n                self.disc_opt.step()\n\n                # update description\n                t_l.set_description(f'{epoch}/{epochs} | g: {g_history.avg:.4f} | f: {f_history[0].avg:.4f} | d: {d_history[0].avg:.4f}')\n\n                # fast run (jump out)\n                if self.config.debug.fast_run and t_l.n > 2:\n                    logging.info('fast mode: jump')\n                    break\n\n            # train logs\n            # fuse loss\n            f_l, src_l, adv_l, tar_l, det_l = [f_history[idx].avg for idx in range(5)]\n            log_dict |= {'fus/tot': f_l, 'fus/src': src_l, 'fus/adv': adv_l, 'fus/tar': tar_l, 'fus/det': det_l}\n            # detect loss\n            d_l, box_l, obj_l, cls_l = [d_history[idx].avg for idx in range(4)]\n            log_dict |= {'det/tot': d_l, 'det/box': box_l, 'det/obj': obj_l, 'det/cls': cls_l}\n            # generator loss\n            g_l = g_history.avg\n            log_dict |= {'gen/tot': g_l, 'gen/fus': f_l, 'gen/det': d_l}\n            # discriminator loss\n            d_t_l, d_d_l = [disc_history[idx].avg for idx in range(2)]\n            log_dict |= {'disc/tar': d_t_l, 'disc/det': d_d_l}\n            # learning rate\n            lrs = [x['lr'] for x in self.fd_opt.param_groups]\n            log_dict |= {'lr/0': lrs[0], 'lr/1': lrs[1], 'lr/2': lrs[2]}\n            # log to console\n            logging.info(f'Epoch {epoch}/{epochs} | Generator Loss: {g_l:.4f} | Fuse loss: {f_l:.4f} | Detect loss: {d_l:.4f}')\n\n            # update scheduler\n            self.scheduler.step()\n\n            # eval (fuse & detect: print result in wandb)\n            if epoch % e_interval == 0 or self.config.debug.fast_run:\n                e_l = tqdm(self.v_loader, disable=False, total=len(self.v_loader) if not self.config.debug.fast_run else 3, ncols=120)\n\n                # matrix\n                seen = 0\n                dt, p, r, f1, mp, mr, map50, map_all = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n                j_dict, stats, ap50, ap, ap_class = [], [], [], [], []\n\n                # process\n                for sample in e_l:\n                    sample = dict_to_device(sample, self.fuse.device)\n                    # f_net\n                    fus = self.fuse.eval(ir=sample['ir'], vi=sample['vi'])\n                    # recolor\n                    if self.data_t.color:\n                        fus = torch.cat([fus, sample['cbcr']], dim=1)\n                        fus = ycbcr_to_rgb(fus)\n                    # d_net\n                    seen_x, preview = self.detect.eval(imgs=fus, targets=sample['labels'], stats=stats, preview='detect' not in log_dict)\n                    seen += seen_x\n                    if preview is not None and 'detect' not in log_dict:\n                        log_dict |= {'detect': wandb.Image(preview)}\n                    # fast run (jump out)\n                    if self.config.debug.fast_run and t_l.n > 2:\n                        logging.info('fast mode: jump')\n                        break\n\n                # compute statistics\n                stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)]\n                names = reduce(lambda x, y: x | y, [{idx: name} for idx, name in enumerate(self.data_t.classes)])\n                if len(stats) and stats[0].any():\n                    tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, names=names)\n                    ap50, ap = ap[:, 0], ap.mean(1)  # AP@0.5, AP@0.5:0.95\n                    mp, mr, map50, map_all = p.mean(), r.mean(), ap50.mean(), ap.mean()\n                num_t = numpy.bincount(stats[3].astype(int), minlength=len(self.data_t.classes))  # number of targets per class\n                if num_t.sum() == 0:\n                    logging.warning(f'no labels found, can not compute metrics without labels.')\n\n                # eval logs\n                log_dict |= {'eval/precision': mp, 'eval/recall': mr, 'eval/map50': map50, 'eval/map': map_all}\n                # log to console (per class)\n                logging.info(f'Epoch {epoch}/{epochs} | Precision: {mp:.4f} | Recall: {mr:.4f} | mAP50: {map50:.4f} | mAP: {map_all:.4f}')\n                if len(stats) and len(self.data_t.classes) > 1:\n                    for i, c in enumerate(ap_class):\n                        logging.info(\n                            f'{names[c]} | tot: {num_t[c]} | p: {p[i]:.4f} | r: {r[i]:.4f} | ap50: {ap50[i]:.4f} | ap: {ap[i]:.4f}'\n                        )\n\n                # mark best\n                if map_all > best_map:\n                    best_map = map_all\n                    Path(self.save_dir / 'meta.txt').write_text(f'best_map: {best_map:.4f} | epoch: {epoch}')\n                    ckpt = self.fuse.save_ckpt() | self.detect.save_ckpt()\n                    torch.save(ckpt, self.save_dir / f'{str(epoch).zfill(5)}-{best_map:.4f}.pth')\n\n            # update wandb\n            self.runs.log(log_dict)\n            # save model\n            if epoch % s_interval == 0 or self.config.debug.fast_run:\n                ckpt = self.fuse.save_ckpt() | self.detect.save_ckpt()\n                torch.save(ckpt, self.save_dir / f'{str(epoch).zfill(5)}.pth')\n                logging.info(f'Epoch {epoch}/{epochs} | Model Saved')\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', default='config/default.yaml', help='config file path')\n    parser.add_argument('--auth', help='wandb auth api key')\n    args = parser.parse_args()\n    train = TrainFD(args.cfg, args.auth)\n    train.run()\n"
  },
  {
    "path": "scripts/utils/smart_optimizer.py",
    "content": "from typing import Tuple, List, Optional\n\nfrom torch.optim import Optimizer, AdamW, Adam, SGD\n\nfrom config import ConfigDict\n\n\ndef smart_optimizer(config: ConfigDict, param_group: Tuple[List, List, List], lr: Optional[float] = None) -> Optimizer:\n    if lr is not None:\n        config.lr_i = lr\n    groups = [\n        {'params': param_group[0], 'lr': config.lr_i, 'weight_decay': config.weight_decay},\n        {'params': param_group[1], 'lr': config.lr_i, 'weight_decay': 0},\n    ]\n    match config.name:\n        case 'sgd':\n            opt = SGD(param_group[2], lr=config.lr_i, momentum=config.momentum, nesterov=True)\n        case 'adam':\n            opt = Adam(param_group[2], lr=config.lr_i, betas=(config.momentum, 0.999))\n        case 'adamw':\n            opt = AdamW(param_group[2], lr=config.lr_i, betas=(config.momentum, 0.999), weight_decay=0)\n        case _:\n            opt = None\n            assert NotImplemented, f'unsupported optimizer: {config.name}'\n    opt.add_param_group(groups[0])\n    opt.add_param_group(groups[1])\n    return opt\n"
  },
  {
    "path": "tools/choose_images.py",
    "content": "from functools import reduce\nfrom pathlib import Path\nfrom typing import Literal\n\nimport cv2\nimport numpy\n\n\ndef choose_images(root: str | Path, mode: str = Literal['train', 'val', 'pred']):\n    root = Path(root)\n    names = [x.name for x in sorted(root.glob('ir/*')) if x.suffix in ['.png', '.jpg', '.bmp']]\n    save = []\n    for name in names:\n        x = cv2.imread(str(root / 'ir' / name), cv2.IMREAD_GRAYSCALE)\n        y = cv2.imread(str(root / 'vi' / name), cv2.IMREAD_GRAYSCALE)\n        t = numpy.hstack([x, y])\n        cv2.imshow(name, t)\n        if cv2.waitKey() == ord('s'):\n            save.append(name)\n        cv2.destroyWindow(name)\n    meta = root / 'meta'\n    meta.mkdir(parents=True, exist_ok=True)\n    meta_f = meta / f'{mode}.txt'\n    meta_f.write_text(reduce(lambda i, j: i + j, [t + '\\n' for t in save]))\n\n\nif __name__ == '__main__':\n    choose_images('data/tno', mode='train')\n"
  },
  {
    "path": "tools/convert_to_png.py",
    "content": "import argparse\nimport logging\nfrom pathlib import Path\n\nimport cv2\nfrom tqdm import tqdm\n\n\ndef convert_to_png(src: str | Path, color: bool):\n    img_list = [x for x in Path(src).rglob('*') if x.suffix in ['.bmp', '.jpg', '.tiff']]\n    process = tqdm(sorted(img_list))\n    for o_path in process:\n        n_path = o_path.with_suffix('.png')\n        process.set_description(f'convert {o_path.name} to {n_path.name}')\n        img = cv2.imread(str(o_path), cv2.IMREAD_COLOR if color else cv2.IMREAD_GRAYSCALE)\n        cv2.imwrite(str(n_path), img)\n        o_path.unlink()\n\n\nif __name__ == '__main__':\n    logging.basicConfig(level='DEBUG')\n    parser = argparse.ArgumentParser('convert to png')\n    parser.add_argument('--src', help='folder need to be converted')\n    parser.add_argument('--color', action='store_true', help='use color mode (recommend on for vis, off for ir)')\n    config = parser.parse_args()\n    convert_to_png(**vars(config))\n"
  },
  {
    "path": "tools/data_preview.py",
    "content": "import argparse\nfrom pathlib import Path\nfrom typing import Optional\n\nimport cv2\nimport torch\nfrom kornia import image_to_tensor, tensor_to_image\nfrom torch import Tensor\nfrom torchvision.utils import draw_bounding_boxes\nfrom tqdm import tqdm\n\nimport loader\nfrom loader.utils.reader import label_read\n\n\ndef data_preview(img_f: str | Path, lbl_f: str | Path, dst_f: str | Path, dataset: Optional[str] = None):\n    # create dst\n    dst_f = Path(dst_f)\n    dst_f.mkdir(parents=True, exist_ok=True)\n    # images list\n    img_f, lbl_f = Path(img_f), Path(lbl_f)\n    img_l = sorted([x.stem for x in img_f.glob('*.png')])\n    # dataset settings\n    classes, palette = [], []\n    if dataset is not None:\n        dataset = getattr(loader, dataset)\n        classes = dataset.classes\n        palette = dataset.palette\n    t_l = tqdm(img_l)\n    for stem in t_l:\n        t_l.set_description(f'draw on {stem}')\n        lbl = label_read(lbl_f / f'{stem}.txt')\n        img = image_to_tensor(cv2.imread(str(img_f / f'{stem}.png')))\n        lbl[:, 1:] *= Tensor([img.shape[-1], img.shape[-2], img.shape[-1], img.shape[-2]])\n        boxes = [x[1:] for x in lbl]\n        if dataset is not None:\n            cls = [classes[int(x[0])] for x in lbl]\n            colors = [palette[int(x[0])] for x in lbl]\n            img = draw_bounding_boxes(img, torch.stack(boxes, dim=0), cls, colors, width=3)\n        else:\n            img = draw_bounding_boxes(img, torch.stack(boxes, dim=0), width=3)\n        cv2.imwrite(str(dst_f / f'{stem}.png'), tensor_to_image(img))\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser('data preview')\n    parser.add_argument('--img', help='image folder')\n    parser.add_argument('--lbl', help='label folder')\n    parser.add_argument('--dst', help='mask output folder (we will create it if not exists)')\n    parser.add_argument('--cls', required=False, help='dataset type (random if not specified)')\n    config = parser.parse_args()\n    data_preview(img_f=config.img, lbl_f=config.lbl, dst_f=config.dst, dataset=config.cls)\n"
  },
  {
    "path": "tools/dict_to_device.py",
    "content": "from typing import Dict\n\nfrom torch import Tensor\nfrom torch.types import Device\n\n\ndef dict_to_device(d: Dict, device: Device) -> Dict | None:\n    if d is None:\n        return None\n    for k, v in d.items():\n        if isinstance(v, Tensor):\n            d[k] = d[k].to(device)\n    return d\n"
  },
  {
    "path": "tools/environment_probe.py",
    "content": "import logging\nimport sys\n\nimport torch\n\n\nclass EnvironmentProbe:\n    \"\"\"\n    Detects the configuration of the environment and returns devices status.\n    \"\"\"\n\n    def __init__(self):\n        python_v = sys.version.split()[0]\n        pytorch_v = torch.__version__\n        cuda_s = torch.cuda.is_available()\n        device = torch.cuda.current_device() if cuda_s else 'cpu'\n        device_n = torch.cuda.get_device_name(device)\n        logging.info(f'python: {python_v} | pytorch: {pytorch_v} | gpu: {device_n if cuda_s else False}')\n        self.device = device\n\n    def memory_status(self):\n        if not torch.cuda.is_available():\n            return {'current': 'unavailable', 'max': 'unavailable'}\n        memory_a = torch.cuda.memory_allocated(self.device) / 1024 ** 3\n        memory_ma = torch.cuda.max_memory_allocated(self.device) / 1024 ** 3\n        logging.debug(f'memory: {memory_a:.2f}GB (history max: {memory_ma:.2f}GB)')\n        return {'current': memory_a, 'max': memory_ma}\n\n\nif __name__ == '__main__':\n    logging.basicConfig(level=logging.DEBUG)\n    probe = EnvironmentProbe()\n    probe.memory_status()\n"
  },
  {
    "path": "tools/generate_mask.py",
    "content": "import argparse\nimport logging\n\nfrom pipeline.saliency import Saliency\n\n\ndef generate_mask(url: str, src: str, dst: str):\n    saliency = Saliency(url=url)\n    saliency.inference(src=src, dst=dst)\n\n\nif __name__ == '__main__':\n    logging.basicConfig(level='INFO')\n    default_url = 'https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/u2netp.pth'\n    parser = argparse.ArgumentParser('mask generator')\n    parser.add_argument('--url', default=default_url, help='checkpoint url')\n    parser.add_argument('--src', help='folder need to be detected')\n    parser.add_argument('--dst', help='mask output folder (we will create it if not exists)')\n    config = parser.parse_args()\n    generate_mask(**vars(config))\n"
  },
  {
    "path": "tools/scenario_reader.py",
    "content": "import json\nimport logging\nfrom functools import reduce\nfrom pathlib import Path\n\n\ndef scenario_counter(src: str | Path):\n    # read scenario from json file\n    src = Path(src)\n    scenarios = json.load(open(src, 'r'))\n    # output as tree format\n    logging.debug(f'total scenarios: {len(scenarios)}')\n    tot_t_frame, tot_v_frame = 0, 0\n    for scenario in scenarios:\n        t_frame, v_frame = 0, 0\n        frame_buf = []\n        # count frame\n        for scene in scenario['scene']:\n            frame = 0\n            for fr in scene['range']:\n                frame += fr['max'] - fr['min'] + 1\n            frame_buf.append(f' | -- {scene[\"name\"]} (frame: {frame}, mode: {scene[\"mode\"]})')\n            if scene['mode'] == 'train':\n                t_frame += frame\n            else:\n                v_frame += frame\n        # output\n        logging.debug(f'-- {scenario[\"name\"]} (scenes: {len(scenario[\"scene\"])}, train: {t_frame}, val: {v_frame})')\n        _ = [logging.debug(x) for x in frame_buf]\n        tot_t_frame += t_frame\n        tot_v_frame += v_frame\n    logging.debug(f'total train frame: {tot_t_frame}, total val frame: {tot_v_frame}')\n\n\ndef generate_meta(root: str | Path):\n    root = Path(root)\n    # read scenario from json file\n    t_frame, v_frame = [], []\n    scenarios = json.load((root / 'meta' / 'scenario.json').open('r'))\n    # count frame\n    for scenario in scenarios:\n        for scene in scenario['scene']:\n            for fr in scene['range']:\n                frame = list(range(fr['min'], fr['max'] + 1))\n                if scene['mode'] == 'train':\n                    t_frame += frame\n                else:\n                    v_frame += frame\n    # sort by index\n    t_frame.sort()\n    v_frame.sort()\n    # write to file\n    (root / 'meta' / 'train.txt').write_text(reduce(lambda i, j: i + j, [f'{str(x).zfill(5)}.png\\n' for x in t_frame]))\n    (root / 'meta' / 'val.txt').write_text(reduce(lambda i, j: i + j, [f'{str(x).zfill(5)}.png\\n' for x in v_frame]))\n    # total frame\n    logging.info(f'total train frame: {len(t_frame)}, total val frame: {len(v_frame)}')\n\n\nif __name__ == '__main__':\n    scenario_counter('data/m3fd/meta/scenario.json')\n    generate_meta('data/m3fd')\n"
  },
  {
    "path": "train.py",
    "content": "import argparse\nimport logging\nfrom pathlib import Path\n\nimport torch.backends.cudnn\nimport yaml\n\nimport scripts\nfrom config import from_dict\n\nif __name__ == '__main__':\n    # args parser\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--cfg', default='config/default.yaml', help='config file path')\n    parser.add_argument('--auth', help='wandb auth api key')\n    args = parser.parse_args()\n\n    # init config\n    config = yaml.safe_load(Path(args.cfg).open('r'))\n    config = from_dict(config)  # convert dict to object\n    config = config\n\n    # init logger\n    log_f = '%(asctime)s | %(filename)s[line:%(lineno)d] | %(levelname)s | %(message)s'\n    logging.basicConfig(level=config.debug.log, format=log_f)\n\n    # init device & anomaly detector\n    torch.backends.cudnn.benchmark = True\n    torch.autograd.set_detect_anomaly(True)\n\n    # choose train script\n    logging.info(f'enter {config.strategy} train mode')\n    match config.strategy:\n        case 'fuse':\n            train_p = getattr(scripts, 'TrainF')\n        case 'detect':\n            if config.loss.bridge.fuse != 0:\n                logging.warning('overwrite fuse loss weight to 0')\n                config.loss.bridge.fuse = 0\n            train_p = getattr(scripts, 'TrainFD')\n        case 'fuse & detect':\n            train_p = getattr(scripts, 'TrainFD')\n        case _:\n            raise ValueError(f'unknown strategy: {config.strategy}')\n\n    # create script instance\n    train = train_p(config, wandb_key=args.auth)\n    train.run()\n"
  },
  {
    "path": "tutorial.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"# TarDAL online tutorial | CVPR 2022\\n\",\n    \"\\n\",\n    \"This is the **official** TarDAL notebook, and is freely available for everyone.\\n\",\n    \"For more information please visit [GitHub Repository](https://github.com/JinyuanLiu-CV/TarDAL).\\n\",\n    \"Thank you!\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%% md\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"## Setup Environment\\n\",\n    \"\\n\",\n    \"Install requirements for TarDAL.\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%% md\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"!nvidia-smi  # check GPU environment\\n\",\n    \"!git clone https://github.com/JinyuanLiu-CV/TarDAL.git  # clone repository from GitHub\\n\",\n    \"\\n\",\n    \"# use python 3.10\\n\",\n    \"!wget https://github.com/korakot/kora/releases/download/v0.10/py310.sh\\n\",\n    \"!bash ./py310.sh -b -f -p /usr/local\\n\",\n    \"!python -m ipykernel install --name \\\"py310\\\" --user\\n\",\n    \"\\n\",\n    \"%cd TarDAL\\n\",\n    \"%pip install -r requirements.txt  # install tardal requirements\\n\",\n    \"%pip install -r module/detect/requirements.txt # install yolov5 requirements\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"## Fuse or Eval\\n\",\n    \"\\n\",\n    \"### Load Image (List)\\n\",\n    \"\\n\",\n    \"infrared image(s):\\n\",\n    \"![infrared](assets/sample/s1/ir/M3FD_00471.png)\\n\",\n    \"\\n\",\n    \"visible image(s):\\n\",\n    \"![visible](assets/sample/s1/vi/M3FD_00471.png)\\n\",\n    \"\\n\",\n    \"### Init TarDAL Pipeline\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"outputs\": [],\n   \"source\": [\n    \"from scripts import InferF\\n\",\n    \"from config import from_dict\\n\",\n    \"import yaml\\n\",\n    \"from pathlib import Path\\n\",\n    \"from IPython import display\\n\",\n    \"\\n\",\n    \"# init config\\n\",\n    \"config = yaml.safe_load(Path('config/official/colab.yaml').open('r'))\\n\",\n    \"config = from_dict(config)  # convert dict to object\\n\",\n    \"\\n\",\n    \"# init infer pipeline\\n\",\n    \"infer_p = InferF(config, save_dir='runs/sample/s1')\\n\",\n    \"\\n\",\n    \"# generate fusion sample\\n\",\n    \"infer_p.run()\\n\",\n    \"\\n\",\n    \"# display sample\\n\",\n    \"display.Image('runs/sample/s1/M3FD_00471.png')\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"is_executing\": true\n    }\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 2\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython2\",\n   \"version\": \"2.7.6\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}\n"
  }
]