Repository: dlut-dimt/TarDAL Branch: main Commit: 6a9edd744b44 Files: 121 Total size: 578.2 KB Directory structure: gitextract_5xl4bhv7/ ├── .github/ │ └── workflows/ │ └── sync.yml ├── .gitignore ├── CITATION.cff ├── LICENSE ├── README.md ├── assets/ │ └── sample/ │ └── s1/ │ └── meta/ │ └── pred.txt ├── config/ │ ├── __init__.py │ ├── default.yaml │ ├── exp/ │ │ ├── i-tardal-dt.yaml │ │ └── t-tardal-ct.yaml │ └── official/ │ ├── colab.yaml │ ├── infer/ │ │ ├── tardal-ct.yaml │ │ ├── tardal-dt.yaml │ │ └── tardal-tt.yaml │ └── train/ │ ├── tardal-ct.yaml │ ├── tardal-dt.yaml │ └── tardal-tt.yaml ├── data/ │ └── README.md ├── functions/ │ ├── __init__.py │ ├── div_loss.py │ └── get_param_groups.py ├── infer.py ├── loader/ │ ├── __init__.py │ ├── m3fd.py │ ├── roadscene.py │ ├── tno.py │ └── utils/ │ ├── __init__.py │ ├── checker.py │ └── reader.py ├── module/ │ ├── __init__.py │ ├── detect/ │ │ ├── README.md │ │ ├── models/ │ │ │ ├── __init__.py │ │ │ ├── common.py │ │ │ ├── experimental.py │ │ │ ├── hub/ │ │ │ │ ├── anchors.yaml │ │ │ │ ├── yolov3-spp.yaml │ │ │ │ ├── yolov3-tiny.yaml │ │ │ │ ├── yolov3.yaml │ │ │ │ ├── yolov5-bifpn.yaml │ │ │ │ ├── yolov5-fpn.yaml │ │ │ │ ├── yolov5-p2.yaml │ │ │ │ ├── yolov5-p34.yaml │ │ │ │ ├── yolov5-p6.yaml │ │ │ │ ├── yolov5-p7.yaml │ │ │ │ ├── yolov5-panet.yaml │ │ │ │ ├── yolov5l6.yaml │ │ │ │ ├── yolov5m6.yaml │ │ │ │ ├── yolov5n6.yaml │ │ │ │ ├── yolov5s-ghost.yaml │ │ │ │ ├── yolov5s-transformer.yaml │ │ │ │ ├── yolov5s6.yaml │ │ │ │ └── yolov5x6.yaml │ │ │ ├── tf.py │ │ │ ├── yolo.py │ │ │ ├── yolov5l.yaml │ │ │ ├── yolov5m.yaml │ │ │ ├── yolov5n.yaml │ │ │ ├── yolov5s.yaml │ │ │ └── yolov5x.yaml │ │ ├── requirements.txt │ │ └── utils/ │ │ ├── __init__.py │ │ ├── activations.py │ │ ├── augmentations.py │ │ ├── autoanchor.py │ │ ├── autobatch.py │ │ ├── aws/ │ │ │ ├── __init__.py │ │ │ ├── mime.sh │ │ │ ├── resume.py │ │ │ └── userdata.sh │ │ ├── benchmarks.py │ │ ├── callbacks.py │ │ ├── dataloaders.py │ │ ├── docker/ │ │ │ ├── Dockerfile │ │ │ ├── Dockerfile-arm64 │ │ │ └── Dockerfile-cpu │ │ ├── downloads.py │ │ ├── flask_rest_api/ │ │ │ ├── README.md │ │ │ ├── example_request.py │ │ │ └── restapi.py │ │ ├── general.py │ │ ├── google_app_engine/ │ │ │ ├── Dockerfile │ │ │ ├── additional_requirements.txt │ │ │ └── app.yaml │ │ ├── loggers/ │ │ │ ├── __init__.py │ │ │ └── wandb/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── log_dataset.py │ │ │ ├── sweep.py │ │ │ ├── sweep.yaml │ │ │ └── wandb_utils.py │ │ ├── loss.py │ │ ├── metrics.py │ │ ├── plots.py │ │ └── torch_utils.py │ ├── fuse/ │ │ ├── __init__.py │ │ ├── discriminator.py │ │ └── generator.py │ └── saliency/ │ ├── __init__.py │ └── u2net.py ├── pipeline/ │ ├── __init__.py │ ├── detect.py │ ├── fuse.py │ ├── iqa.py │ ├── saliency.py │ └── train.py ├── requirements.txt ├── scripts/ │ ├── __init__.py │ ├── infer_f.py │ ├── infer_fd.py │ ├── train_f.py │ ├── train_fd.py │ └── utils/ │ └── smart_optimizer.py ├── tools/ │ ├── choose_images.py │ ├── convert_to_png.py │ ├── data_preview.py │ ├── dict_to_device.py │ ├── environment_probe.py │ ├── generate_mask.py │ └── scenario_reader.py ├── train.py └── tutorial.ipynb ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/workflows/sync.yml ================================================ name: Mirror to DUT DIMT on: [ push, delete, create ] jobs: git-mirror: runs-on: ubuntu-latest steps: - name: Configure Private Key env: SSH_PRIVATE_KEY: ${{ secrets.PRIVATE_KEY }} run: | mkdir -p ~/.ssh echo "$SSH_PRIVATE_KEY" > ~/.ssh/id_rsa chmod 600 ~/.ssh/id_rsa echo "StrictHostKeyChecking no" >> ~/.ssh/config - name: Push Mirror env: SOURCE_REPO: 'https://github.com/JinyuanLiu-CV/TarDAL.git' DESTINATION_REPO: 'git@github.com:dlut-dimt/TarDAL.git' run: | git clone --mirror "$SOURCE_REPO" && cd `basename "$SOURCE_REPO"` git remote set-url --push origin "$DESTINATION_REPO" git fetch -p origin git for-each-ref --format 'delete %(refname)' refs/pull | git update-ref --stdin git push --mirror ================================================ FILE: .gitignore ================================================ # project config file (contain sensitive: server information) .idea/* # fuse results (contain images that can be reproduced by given model parameters) runs/* # macOS finder file (contain sensitive: local username) **/.DS_Store # python cache **/__pycache__ # experimental data data/* !data/README.md # weights (update by release) weights/* # test files **/test/* # wandb wandb/* ================================================ FILE: CITATION.cff ================================================ @inproceedings{liu2022target, title={Target-aware Dual Adversarial Learning and a Multi-scenario Multi-Modality Benchmark to Fuse Infrared and Visible for Object Detection}, author={Liu, Jinyuan and Fan, Xin and Huang, Zhanbo and Wu, Guanyao and Liu, Risheng and Zhong, Wei and Luo, Zhongxuan}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, pages={5802--5811}, year={2022} } ================================================ FILE: LICENSE ================================================ GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . ================================================ FILE: README.md ================================================ # TarDAL [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JinyuanLiu-CV/TarDAL/blob/main/tutorial.ipynb) ![visitors](https://visitor-badge.glitch.me/badge?page_id=JinyuanLiu-CV.TarDAL) Jinyuan Liu, Xin Fan*, Zhangbo Huang, Guanyao Wu, Risheng Liu , Wei Zhong, Zhongxuan Luo,**“Target-aware Dual Adversarial Learning and a Multi-scenario Multi-Modality Benchmark to Fuse Infrared and Visible for Object Detection”**, IEEE/CVF Conference on Computer Vision and Pattern Recognition **(CVPR)**, 2022. **(Oral)** - [*[ArXiv]*](https://arxiv.org/abs/2203.16220v1) - [*[CVPR]*](https://openaccess.thecvf.com/content/CVPR2022/papers/Liu_Target-Aware_Dual_Adversarial_Learning_and_a_Multi-Scenario_Multi-Modality_Benchmark_To_CVPR_2022_paper.pdf) --- ![Abstract](assets/first_figure.jpg) ---

M3FD Dataset

### Preview The preview of our dataset is as follows. --- ![preview](assets/preview.png) ![gif](assets/preview.gif) --- ### Details - **Sensor**: A synchronized system containing one binocular optical camera and one binocular infrared sensor. More details are available in the paper. - **Main scene**: - Campus of Dalian University of Technology. - State Tourism Holiday Resort at the Golden Stone Beach in Dalian, China. - Main roads in Jinzhou District, Dalian, China. - **Total number of images**: - **8400** (for fusion, detection and fused-based detection) - **600** (independent scene for fusion) - **Total number of image pairs**: - **4200** (for fusion, detection and fused-based detection) - **300** (independent scene for fusion) - **Format of images**: - [Infrared] 24-bit grayscale bitmap - [Visible] 24-bit color bitmap - **Image size**: **1024 x 768** pixels (mostly) - **Registration**: **All image pairs are registered.** The visible images are calibrated by using the internal parameters of our synchronized system, and the infrared images are artificially distorted by homography matrix. - **Labeling**: **34407 labels** have been manually labeled, containing 6 kinds of targets: **{People, Car, Bus, Motorcycle, Lamp, Truck}**. (Limited by manpower, some targets may be mismarked or missed. We would appreciate if you would point out wrong or missing labels to help us improve the dataset) ### Download - [Google Drive](https://drive.google.com/drive/folders/1H-oO7bgRuVFYDcMGvxstT1nmy0WF_Y_6?usp=sharing) - [Baidu Yun](https://pan.baidu.com/s/1GoJrrl_mn2HNQVDSUdPCrw?pwd=M3FD) If you have any question or suggestion about the dataset, please email to [Guanyao Wu](mailto:rollingplainko@gmail.com) or [Jinyuan Liu](mailto:atlantis918@hotmail.com).

TarDAL Fusion

### Baselines In the experiment process, we used the following **outstanding** work as our baseline. *Note: Sorted alphabetically* - [AUIF](https://ieeexplore.ieee.org/document/9416456) (IEEE TCSVT 2021) - [DDcGAN](https://github.com/hanna-xu/DDcGAN) (IJCAI 2019) - [Densefuse](https://github.com/hli1221/imagefusion_densefuse) (IEEE TIP 2019) - [DIDFuse](https://github.com/Zhaozixiang1228/IVIF-DIDFuse) (IJCAI 2020) - [FusionGAN](https://github.com/jiayi-ma/FusionGAN) (Information Fusion 2019) - [GANMcC](https://github.com/HaoZhang1018/GANMcC) (IEEE TIM 2021) - [MFEIF](https://github.com/JinyuanLiu-CV/MFEIF) (IEEE TCSVT 2021) - [RFN-Nest](https://github.com/hli1221/imagefusion-rfn-nest) (Information Fusion 2021) - [SDNet](https://github.com/HaoZhang1018/SDNet) (IJCV 2021) - [U2Fusion](https://github.com/hanna-xu/U2Fusion) (IEEE TPAMI 2020) ### Quick Start Under normal circumstances, you may just be curious about the results of the fusion task, so we have prepared an online demonstration. Our online preview (free) in [Colab](https://colab.research.google.com/github/JinyuanLiu-CV/TarDAL/blob/main/tutorial.ipynb). ### Set Up on Your Own Machine When you want to dive deeper or apply it on a larger scale, you can configure our TarDAL on your computer following the steps below. #### Virtual Environment We strongly recommend that you use Conda as a package manager. ```shell # create virtual environment conda create -n tardal python=3.10 conda activate tardal # select pytorch version yourself # install tardal requirements pip install -r requirements.txt # install yolov5 requirements pip install -r module/detect/requirements.txt ``` #### Data Preparation You should put the data in the correct place in the following form. ``` TarDAL ROOT ├── data | ├── m3fd | | ├── ir # infrared images | | ├── vi # visible images | | ├── labels # labels in txt format (yolo format) | | └── meta # meta data, includes: pred.txt, train.txt, val.txt | ├── tno | | ├── ir # infrared images | | ├── vi # visible images | | └── meta # meta data, includes: pred.txt, train.txt, val.txt | ├── roadscene | └── ... ``` You can directly download the TNO and RoadScene datasets organized in this format from here. - [Google Drive](https://drive.google.com/drive/folders/1H-oO7bgRuVFYDcMGvxstT1nmy0WF_Y_6?usp=sharing) - [Baidu Yun](https://pan.baidu.com/s/1GoJrrl_mn2HNQVDSUdPCrw?pwd=M3FD) #### Fuse or Eval In this section, we will guide you to generate fusion images using our pre-trained model. As we mentioned in our paper, we provide three pre-trained models. | Name | Description | |-----------|-----------------------------------------------------------------| | TarDAL-DT | Optimized for human vision. (Default) | | TarDAL-TT | Optimized for object detection. | | TarDAL-CT | Optimal solution for joint human vision and detection accuracy. | You can find their corresponding configuration file path in [configs](config/official/infer). Some settings you should pay attention to: * config.yaml * `strategy`: save images (fuse) or save images & labels (fuse & detect) * `dataset`: name & root * `inference`: each item in inference * infer.py * `--cfg`: config file path, such as `configs/official/tardal-dt.yaml` * `--save_dir`: result save folder Under normal circumstances, you don't need to manually download the model parameters, our program will do it for you. ```shell # TarDAL-DT # use official tardal-dt infer config and save images to runs/tardal-dt python infer.py --cfg configs/official/tardal-dt.yaml --save_dir runs/tardal-dt # TarDAL-TT # use official tardal-tt infer config and save images to runs/tardal-tt python infer.py --cfg configs/official/tardal-tt.yaml --save_dir runs/tardal-tt # TarDAL-CT # use official tardal-ct infer config and save images to runs/tardal-ct python infer.py --cfg configs/official/tardal-ct.yaml --save_dir runs/tardal-ct ``` #### Train We provide some training script for you to train your own model. Please note: The training code is only intended to assist in understanding the paper and is not recommended for direct application in production environments. Unlike previous code versions, you don't need to preprocess the data, we will automatically calculate the IQA weights and mask. ```shell # TarDAL-DT python train.py --cfg configs/official/tardal-dt.yaml --auth $YOUR_WANDB_KEY # TarDAL-TT python train.py --cfg configs/official/tardal-tt.yaml --auth $YOUR_WANDB_KEY # TarDAL-CT python train.py --cfg configs/official/tardal-ct.yaml --auth $YOUR_WANDB_KEY ``` If you want to base your approach on ours and extend it to a production environment, here are some additional suggestions for you. [Suggestion: A better train process for everyone.](assets/train_process.png) ### Any Question If you have any other questions about the code, please email [Zhanbo Huang](mailto:zbhuang917@hotmail.com). Due to job changes, the previous link `zbhuang@mail.dlut.edu.cn` is no longer available. ## Citation If this work has been helpful to you, please feel free to cite our paper! ``` @inproceedings{liu2022target, title={Target-aware Dual Adversarial Learning and a Multi-scenario Multi-Modality Benchmark to Fuse Infrared and Visible for Object Detection}, author={Liu, Jinyuan and Fan, Xin and Huang, Zhanbo and Wu, Guanyao and Liu, Risheng and Zhong, Wei and Luo, Zhongxuan}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, pages={5802--5811}, year={2022} } ``` ================================================ FILE: assets/sample/s1/meta/pred.txt ================================================ M3FD_00471.png ROAD_040.jpg TNO_028.bmp ================================================ FILE: config/__init__.py ================================================ class ConfigDict(dict): __setattr__ = dict.__setitem__ __getattr__ = dict.__getitem__ def from_dict(obj) -> ConfigDict: if not isinstance(obj, dict): return obj d = ConfigDict() for k, v in obj.items(): d[k] = from_dict(v) return d ================================================ FILE: config/default.yaml ================================================ # base settings device : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...) save_dir : 'cache' # folder used for saving the model, logs results # debug mode settings debug : log : INFO # log level wandb_mode: 'online' # wandb connection mode fast_run : false # use a small subset of the dataset for debugging code # framework training strategy: # backward method: fuse (direct training DT) # backward method: detect (task-oriented training TT) # backward method: fuse & detect (cooperative training CT) strategy : fuse & detect # fuse network settings: core of infrared and visible fusion fuse : dim : 32 # features base dimensions for generator and discriminator depth : 3 # depth of dense architecture pretrained: weights/v1/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters # detect network settings: available if framework in joint mode (detect, fuse + detect) detect : model : yolov5s # yolo model (yolov5 n,s,m,l,x) channels : 3 # input channels (3: rgb or 1: grayscale) pretrained: weights/v1/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters # saliency network settings: generating mask for training tardal saliency : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth # iqa settings: information measurement iqa : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth # dataset settings: # we provide four built-in representative datasets, # if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue. dataset : name : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.) root : data/m3fd # dataset root path # only available for fuse & detect detect: hsv : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing] degrees : 0 # image rotation (+/- degrees) [developing] translate : 0.1 # image translation (+/- fraction) [developing] scale : 0.9 # image scale (+/- gain) [developing] shear : 0.0 # image shear (+/- degrees) [developing] perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 [developing] flip_ud : 0.0 # image flip up-down (probability) flip_lr : 0.5 # image flip left-right (probability) # train settings: train : image_size : [ 320, 320 ] # training image size in (h, w) batch_size : 16 # batch size used to train num_workers : 8 # number of workers used in data loading epochs : 300 # number of epochs to train eval_interval: 1 # evaluation interval during training save_interval: 5 # save interval during training freeze : [ ] # freeze layers (e.g. backbone, head, ...) # inference settings: inference: batch_size : 8 # batch size used to train num_workers: 8 # number of workers used in data loading use_eval : true # use eval mode in inference mode, default true, false for v0 weights. grayscale : false # ignore dataset settings, save as grayscale image save_txt : false # save label file # loss settings: loss : # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det fuse : src_fn: v0 # v0: 0.01*ssim + 0.99*l1 | v1: ms-ssim src : 1 # src loss gain (v0: 0.8) adv : 0 # adv loss gain (v0: 0.2) t_adv : 1 # target loss gain (v0: 1) d_adv : 1 # detail loss gain (v0: 1) d_mask: false # use mask for detail discriminator (v0: true) d_warm: 1 # discriminator warmup epochs # detect loss: box + cls + obj detect: box : 0.05 # box loss gain cls : 0.3 # cls loss gain cls_pw : 1.0 # cls BCELoss positive weight obj : 0.7 # obj loss gain (scale with pixels) obj_pw : 1.0 # obj BCELoss positive weight iou_t : 0.20 # IoU training threshold anchor_t: 4.0 # anchor-multiple threshold fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) # bridge bridge: fuse : 0.5 # fuse loss gain for generator detect: 0.5 # detect loss gain for generator warm : 2 # bridge warm up epochs (det -> det, fuse -> fuse) # optimizer settings: optimizer: name : sgd # optimizer name lr_i : 1.0e-2 # initial learning rate lr_f : 1.0e-1 # final learning rate (lr_i * lr_f) momentum : 0.937 # adam beta1 weight_decay: 5.0e-4 # decay rate used in optimizer lr_d : 1.0e-4 # discriminator learning rate # scheduler settings: scheduler: warmup_epochs : [ 2.0, 3.0 ] # start-[0]: bridge warm (keep const), [0]-[1]: normal warm, [1]-end: normal decay warmup_momentum: 0.8 # warmup initial momentum warmup_bias_lr : 0.1 # warmup initial bias lr ================================================ FILE: config/exp/i-tardal-dt.yaml ================================================ # base settings device : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...) save_dir : 'cache' # folder used for saving the model, logs results # debug mode settings debug : wandb_mode: 'online' # wandb connection mode fast_run : false # use a small subset of the dataset for debugging code # framework training strategy: # backward method: fuse (direct training DT) # backward method: detect (task-oriented training TT) # backward method: fuse & detect (cooperative training CT) strategy : fuse & detect # fuse network settings: core of infrared and visible fusion fuse : dim : 32 # features base dimensions for generator and discriminator depth : 3 # depth of dense architecture pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters # detect network settings: available if framework in joint mode (detect, fuse + detect) detect : model : yolov5s # yolo model (yolov5 n,s,m,l,x) channels : 3 # input channels (3: rgb or 1: grayscale) pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters # saliency network settings: generating mask for training tardal saliency : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth # iqa settings: information measurement iqa : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth # dataset settings: # we provide four built-in representative datasets, # if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue. dataset : name : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.) root : data/m3fd # dataset root path # only available for fuse & detect detect: hsv : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing] degrees : 0 # image rotation (+/- degrees) [developing] translate : 0.1 # image translation (+/- fraction) [developing] scale : 0.9 # image scale (+/- gain) [developing] shear : 0.0 # image shear (+/- degrees) [developing] perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 [developing] flip_ud : 0.0 # image flip up-down (probability) flip_lr : 0.5 # image flip left-right (probability) # train settings: train : image_size : [ 224, 224 ] # training image size in (h, w) batch_size : 32 # batch size used to train num_workers : 8 # number of workers used in data loading epochs : 1000 # number of epochs to train eval_interval: 1 # evaluation interval during training save_interval: 5 # save interval during training # inference settings: inference: batch_size : 8 # batch size used to train num_workers: 8 # number of workers used in data loading use_eval : ~ # use eval mode in inference mode, default true, false for v0 weights. grayscale : false # ignore dataset settings, save as grayscale image save_txt : false # save label file # loss settings: loss : # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det fuse : src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim src : 0.8 # src loss gain (1 during v0) adv : 0.2 # adv loss gain (0.1 during v0) t_adv : 0 # target loss gain d_adv : 0 # detail loss gain d_mask: false # use mask for detail discriminator (v0: true) d_warm: 10 # discriminator warmup epochs # detect loss: box + cls + obj detect: box : 0.05 # box loss gain cls : 0.5 # cls loss gain cls_pw : 1.0 # cls BCELoss positive weight obj : 1.0 # obj loss gain (scale with pixels) obj_pw : 1.0 # obj BCELoss positive weight iou_t : 0.20 # IoU training threshold anchor_t: 4.0 # anchor-multiple threshold fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) # bridge bridge: fuse : 0.5 # fuse loss gain for generator detect: 0.5 # detect loss gain for generator # optimizer settings: optimizer: name : adamw # optimizer name lr_i : 1.0e-3 # initial learning rate lr_f : 1.0e-3 # final learning rate momentum : 0.937 # adam beta1 weight_decay: 5.0e-4 # decay rate used in optimizer # scheduler settings: scheduler: warmup_epochs : 3.0 # warmup epochs warmup_momentum: 0.8 # warmup initial momentum warmup_bias_lr : 0.1 # warmup initial bias lr ================================================ FILE: config/exp/t-tardal-ct.yaml ================================================ # base settings device : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...) save_dir : 'cache' # folder used for saving the model, logs results # debug mode settings debug : wandb_mode: 'online' # wandb connection mode fast_run : false # use a small subset of the dataset for debugging code # framework training strategy: # backward method: fuse (direct training DT) # backward method: detect (task-oriented training TT) # backward method: fuse & detect (cooperative training CT) strategy : fuse & detect # fuse network settings: core of infrared and visible fusion fuse : dim : 32 # features base dimensions for generator and discriminator depth : 3 # depth of dense architecture pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters # detect network settings: available if framework in joint mode (detect, fuse + detect) detect : model : yolov5s # yolo model (yolov5 n,s,m,l,x) channels : 3 # input channels (3: rgb or 1: grayscale) pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters # saliency network settings: generating mask for training tardal saliency : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth # iqa settings: information measurement iqa : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth # dataset settings: # we provide four built-in representative datasets, # if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue. dataset : name : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.) root : data/m3fd # dataset root path # only available for fuse & detect detect: hsv : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing] degrees : 0 # image rotation (+/- degrees) [developing] translate : 0.1 # image translation (+/- fraction) [developing] scale : 0.9 # image scale (+/- gain) [developing] shear : 0.0 # image shear (+/- degrees) [developing] perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 [developing] flip_ud : 0.0 # image flip up-down (probability) flip_lr : 0.5 # image flip left-right (probability) # train settings: train : image_size : [ 320, 320 ] # training image size in (h, w) batch_size : 16 # batch size used to train num_workers : 8 # number of workers used in data loading epochs : 1000 # number of epochs to train eval_interval: 1 # evaluation interval during training save_interval: 5 # save interval during training freeze : [ ] # freeze layers (e.g. backbone, head, ...) # inference settings: inference: batch_size : 8 # batch size used to train num_workers: 8 # number of workers used in data loading use_eval : True # use eval mode in inference mode, default true, false for v0 weights. grayscale : false # ignore dataset settings, save as grayscale image save_txt : false # save label file # loss settings: loss : # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det fuse : src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim src : 0.8 # src loss gain (1 during v0) adv : 0.2 # adv loss gain (0.1 during v0) t_adv : 0 # target loss gain d_adv : 0 # detail loss gain d_mask: false # use mask for detail discriminator (v0: true) d_warm: 10 # discriminator warmup epochs # detect loss: box + cls + obj detect: box : 0.05 # box loss gain cls : 0.5 # cls loss gain cls_pw : 1.0 # cls BCELoss positive weight obj : 1.0 # obj loss gain (scale with pixels) obj_pw : 1.0 # obj BCELoss positive weight iou_t : 0.20 # IoU training threshold anchor_t: 4.0 # anchor-multiple threshold fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) # bridge bridge: fuse : 0.5 # fuse loss gain for generator detect: 0.5 # detect loss gain for generator # optimizer settings: optimizer: name : adamw # optimizer name lr_i : 1.0e-3 # initial learning rate lr_f : 1.0e-3 # final learning rate momentum : 0.937 # adam beta1 weight_decay: 5.0e-4 # decay rate used in optimizer # scheduler settings: scheduler: warmup_epochs : 3.0 # warmup epochs warmup_momentum: 0.8 # warmup initial momentum warmup_bias_lr : 0.1 # warmup initial bias lr ================================================ FILE: config/official/colab.yaml ================================================ # base settings device : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...) save_dir : 'cache' # folder used for saving the model, logs results # debug mode settings debug : log : INFO # log level wandb_mode: 'offline' # wandb connection mode fast_run : false # use a small subset of the dataset for debugging code # framework training strategy: # backward method: fuse (direct training DT) # backward method: detect (task-oriented training TT) # backward method: fuse & detect (cooperative training CT) strategy : fuse # fuse network settings: core of infrared and visible fusion fuse : dim : 32 # features base dimensions for generator and discriminator depth : 3 # depth of dense architecture pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-ct.pth # ~: disable, path or url: load with pretrained parameters # detect network settings: available if framework in joint mode (detect, fuse + detect) detect : model : yolov5s # yolo model (yolov5 n,s,m,l,x) channels : 3 # input channels (3: rgb or 1: grayscale) pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-ct.pth # ~: disable, path or url: load with pretrained parameters # saliency network settings: generating mask for training tardal saliency : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth # iqa settings: information measurement iqa : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth # dataset settings: # we provide four built-in representative datasets, # if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue. dataset : name : roadscene # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.) root : assets/sample/s1 # dataset root path # only available for fuse & detect detect: hsv : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing] degrees : 0 # image rotation (+/- degrees) [developing] translate : 0.1 # image translation (+/- fraction) [developing] scale : 0.9 # image scale (+/- gain) [developing] shear : 0.0 # image shear (+/- degrees) [developing] perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 [developing] flip_ud : 0.0 # image flip up-down (probability) flip_lr : 0.5 # image flip left-right (probability) # train settings: train : image_size : [ 224, 224 ] # training image size in (h, w) batch_size : 32 # batch size used to train num_workers : 8 # number of workers used in data loading epochs : 1000 # number of epochs to train eval_interval: 1 # evaluation interval during training save_interval: 5 # save interval during training # inference settings: inference: batch_size : 8 # batch size used to train num_workers: 8 # number of workers used in data loading use_eval : ~ # use eval mode in inference mode, default true, false for v0 weights. grayscale : false # ignore dataset settings, save as grayscale image save_txt : false # save label file # loss settings: loss : # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det fuse : src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim src : 0.8 # src loss gain (1 during v0) adv : 0.2 # adv loss gain (0.1 during v0) t_adv : 0 # target loss gain d_adv : 0 # detail loss gain d_mask: false # use mask for detail discriminator (v0: true) d_warm: 10 # discriminator warmup epochs # detect loss: box + cls + obj detect: box : 0.05 # box loss gain cls : 0.5 # cls loss gain cls_pw : 1.0 # cls BCELoss positive weight obj : 1.0 # obj loss gain (scale with pixels) obj_pw : 1.0 # obj BCELoss positive weight iou_t : 0.20 # IoU training threshold anchor_t: 4.0 # anchor-multiple threshold fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) # bridge bridge: fuse : 0.5 # fuse loss gain for generator detect: 0.5 # detect loss gain for generator # optimizer settings: optimizer: name : adamw # optimizer name lr_i : 1.0e-3 # initial learning rate lr_f : 1.0e-3 # final learning rate momentum : 0.937 # adam beta1 weight_decay: 5.0e-4 # decay rate used in optimizer # scheduler settings: scheduler: warmup_epochs : 3.0 # warmup epochs warmup_momentum: 0.8 # warmup initial momentum warmup_bias_lr : 0.1 # warmup initial bias lr ================================================ FILE: config/official/infer/tardal-ct.yaml ================================================ # base settings device : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...) save_dir : 'cache' # folder used for saving the model, logs results # debug mode settings debug : log : INFO # log level wandb_mode: 'online' # wandb connection mode fast_run : false # use a small subset of the dataset for debugging code # framework training strategy: # backward method: fuse (direct training DT) # backward method: detect (task-oriented training TT) # backward method: fuse & detect (cooperative training CT) strategy : fuse # fuse network settings: core of infrared and visible fusion fuse : dim : 32 # features base dimensions for generator and discriminator depth : 3 # depth of dense architecture pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-ct.pth # ~: disable, path or url: load with pretrained parameters # detect network settings: available if framework in joint mode (detect, fuse + detect) detect : model : yolov5s # yolo model (yolov5 n,s,m,l,x) channels : 3 # input channels (3: rgb or 1: grayscale) pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-ct.pth # ~: disable, path or url: load with pretrained parameters # saliency network settings: generating mask for training tardal saliency : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth # iqa settings: information measurement iqa : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth # dataset settings: # we provide four built-in representative datasets, # if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue. dataset : name : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.) root : data/m3fd # dataset root path # only available for fuse & detect detect: hsv : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing] degrees : 0 # image rotation (+/- degrees) [developing] translate : 0.1 # image translation (+/- fraction) [developing] scale : 0.9 # image scale (+/- gain) [developing] shear : 0.0 # image shear (+/- degrees) [developing] perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 [developing] flip_ud : 0.0 # image flip up-down (probability) flip_lr : 0.5 # image flip left-right (probability) # train settings: train : image_size : [ 224, 224 ] # training image size in (h, w) batch_size : 32 # batch size used to train num_workers : 8 # number of workers used in data loading epochs : 1000 # number of epochs to train eval_interval: 1 # evaluation interval during training save_interval: 5 # save interval during training # inference settings: inference: batch_size : 8 # batch size used to train num_workers: 8 # number of workers used in data loading use_eval : ~ # use eval mode in inference mode, default true, false for v0 weights. grayscale : false # ignore dataset settings, save as grayscale image save_txt : false # save label file # loss settings: loss : # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det fuse : src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim src : 0.8 # src loss gain (1 during v0) adv : 0.2 # adv loss gain (0.1 during v0) t_adv : 0 # target loss gain d_adv : 0 # detail loss gain d_mask: false # use mask for detail discriminator (v0: true) d_warm: 10 # discriminator warmup epochs # detect loss: box + cls + obj detect: box : 0.05 # box loss gain cls : 0.5 # cls loss gain cls_pw : 1.0 # cls BCELoss positive weight obj : 1.0 # obj loss gain (scale with pixels) obj_pw : 1.0 # obj BCELoss positive weight iou_t : 0.20 # IoU training threshold anchor_t: 4.0 # anchor-multiple threshold fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) # bridge bridge: fuse : 0.5 # fuse loss gain for generator detect: 0.5 # detect loss gain for generator # optimizer settings: optimizer: name : adamw # optimizer name lr_i : 1.0e-3 # initial learning rate lr_f : 1.0e-3 # final learning rate momentum : 0.937 # adam beta1 weight_decay: 5.0e-4 # decay rate used in optimizer # scheduler settings: scheduler: warmup_epochs : 3.0 # warmup epochs warmup_momentum: 0.8 # warmup initial momentum warmup_bias_lr : 0.1 # warmup initial bias lr ================================================ FILE: config/official/infer/tardal-dt.yaml ================================================ # base settings device : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...) save_dir : 'cache' # folder used for saving the model, logs results # debug mode settings debug : log : INFO # log level wandb_mode: 'online' # wandb connection mode fast_run : false # use a small subset of the dataset for debugging code # framework training strategy: # backward method: fuse (direct training DT) # backward method: detect (task-oriented training TT) # backward method: fuse & detect (cooperative training CT) strategy : fuse # fuse network settings: core of infrared and visible fusion fuse : dim : 32 # features base dimensions for generator and discriminator depth : 3 # depth of dense architecture pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters # detect network settings: available if framework in joint mode (detect, fuse + detect) detect : model : yolov5s # yolo model (yolov5 n,s,m,l,x) channels : 3 # input channels (3: rgb or 1: grayscale) pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters # saliency network settings: generating mask for training tardal saliency : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth # iqa settings: information measurement iqa : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth # dataset settings: # we provide four built-in representative datasets, # if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue. dataset : name : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.) root : data/m3fd # dataset root path # only available for fuse & detect detect: hsv : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing] degrees : 0 # image rotation (+/- degrees) [developing] translate : 0.1 # image translation (+/- fraction) [developing] scale : 0.9 # image scale (+/- gain) [developing] shear : 0.0 # image shear (+/- degrees) [developing] perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 [developing] flip_ud : 0.0 # image flip up-down (probability) flip_lr : 0.5 # image flip left-right (probability) # train settings: train : image_size : [ 224, 224 ] # training image size in (h, w) batch_size : 32 # batch size used to train num_workers : 8 # number of workers used in data loading epochs : 1000 # number of epochs to train eval_interval: 1 # evaluation interval during training save_interval: 5 # save interval during training # inference settings: inference: batch_size : 8 # batch size used to train num_workers: 8 # number of workers used in data loading use_eval : ~ # use eval mode in inference mode, default true, false for v0 weights. grayscale : false # ignore dataset settings, save as grayscale image save_txt : false # save label file # loss settings: loss : # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det fuse : src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim src : 0.8 # src loss gain (1 during v0) adv : 0.2 # adv loss gain (0.1 during v0) t_adv : 0 # target loss gain d_adv : 0 # detail loss gain d_mask: false # use mask for detail discriminator (v0: true) d_warm: 10 # discriminator warmup epochs # detect loss: box + cls + obj detect: box : 0.05 # box loss gain cls : 0.5 # cls loss gain cls_pw : 1.0 # cls BCELoss positive weight obj : 1.0 # obj loss gain (scale with pixels) obj_pw : 1.0 # obj BCELoss positive weight iou_t : 0.20 # IoU training threshold anchor_t: 4.0 # anchor-multiple threshold fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) # bridge bridge: fuse : 0.5 # fuse loss gain for generator detect: 0.5 # detect loss gain for generator # optimizer settings: optimizer: name : adamw # optimizer name lr_i : 1.0e-3 # initial learning rate lr_f : 1.0e-3 # final learning rate momentum : 0.937 # adam beta1 weight_decay: 5.0e-4 # decay rate used in optimizer # scheduler settings: scheduler: warmup_epochs : 3.0 # warmup epochs warmup_momentum: 0.8 # warmup initial momentum warmup_bias_lr : 0.1 # warmup initial bias lr ================================================ FILE: config/official/infer/tardal-tt.yaml ================================================ # base settings device : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...) save_dir : 'cache' # folder used for saving the model, logs results # debug mode settings debug : log : INFO # log level wandb_mode: 'online' # wandb connection mode fast_run : false # use a small subset of the dataset for debugging code # framework training strategy: # backward method: fuse (direct training DT) # backward method: detect (task-oriented training TT) # backward method: fuse & detect (cooperative training CT) strategy : fuse # fuse network settings: core of infrared and visible fusion fuse : dim : 32 # features base dimensions for generator and discriminator depth : 3 # depth of dense architecture pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-tt.pth # ~: disable, path or url: load with pretrained parameters # detect network settings: available if framework in joint mode (detect, fuse + detect) detect : model : yolov5s # yolo model (yolov5 n,s,m,l,x) channels : 3 # input channels (3: rgb or 1: grayscale) pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-tt.pth # ~: disable, path or url: load with pretrained parameters # saliency network settings: generating mask for training tardal saliency : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth # iqa settings: information measurement iqa : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth # dataset settings: # we provide four built-in representative datasets, # if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue. dataset : name : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.) root : data/m3fd # dataset root path # only available for fuse & detect detect: hsv : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing] degrees : 0 # image rotation (+/- degrees) [developing] translate : 0.1 # image translation (+/- fraction) [developing] scale : 0.9 # image scale (+/- gain) [developing] shear : 0.0 # image shear (+/- degrees) [developing] perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 [developing] flip_ud : 0.0 # image flip up-down (probability) flip_lr : 0.5 # image flip left-right (probability) # train settings: train : image_size : [ 224, 224 ] # training image size in (h, w) batch_size : 32 # batch size used to train num_workers : 12 # number of workers used in data loading epochs : 1000 # number of epochs to train eval_interval: 5 # evaluation interval during training save_interval: 5 # save interval during training # inference settings: inference: batch_size : 8 # batch size used to train num_workers: 12 # number of workers used in data loading use_eval : true # use eval mode in inference mode, default true, false for v0 weights. grayscale : false # ignore dataset settings, save as grayscale image # loss settings: loss : # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det fuse : src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim src : 0.8 # src loss gain (1 during v0) adv : 0.2 # adv loss gain (0.1 during v0) t_adv : 0.5 # target loss gain d_adv : 0.5 # detail loss gain det : 1.0 # det loss gain (available only for detect or fuse+detect mode) d_mask: false # use mask for detail discriminator (v0: true) d_warm: 10 # discriminator warmup epochs # detect loss: box + cls + obj detect: box : 0.05 # box loss gain cls : 0.5 # cls loss gain cls_pw : 1.0 # cls BCELoss positive weight obj : 1.0 # obj loss gain (scale with pixels) obj_pw : 1.0 # obj BCELoss positive weight iou_t : 0.20 # IoU training threshold anchor_t: 4.0 # anchor-multiple threshold fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) # bridge bridge: fuse : 0.5 # fuse loss gain for generator detect: 0.5 # detect loss gain for generator # optimizer settings: optimizer: name : adamw # optimizer name lr_i : 1.0e-3 # initial learning rate lr_f : 1.0e-3 # final learning rate momentum : 0.937 # adam beta1 weight_decay: 5.0e-4 # decay rate used in optimizer # scheduler settings: scheduler: warmup_epochs : 3.0 # warmup epochs warmup_momentum: 0.8 # warmup initial momentum warmup_bias_lr : 0.1 # warmup initial bias lr ================================================ FILE: config/official/train/tardal-ct.yaml ================================================ # base settings device : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...) save_dir : 'cache' # folder used for saving the model, logs results # debug mode settings debug : log : INFO # log level wandb_mode: 'online' # wandb connection mode fast_run : false # use a small subset of the dataset for debugging code # framework training strategy: # backward method: fuse (direct training DT) # backward method: detect (task-oriented training TT) # backward method: fuse & detect (cooperative training CT) strategy : fuse & detect # fuse network settings: core of infrared and visible fusion fuse : dim : 32 # features base dimensions for generator and discriminator depth : 3 # depth of dense architecture pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters # detect network settings: available if framework in joint mode (detect, fuse + detect) detect : model : yolov5s # yolo model (yolov5 n,s,m,l,x) channels : 3 # input channels (3: rgb or 1: grayscale) pretrained: ~ # ~: disable, path or url: load with pretrained parameters # saliency network settings: generating mask for training tardal saliency : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth # iqa settings: information measurement iqa : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth # dataset settings: # we provide four built-in representative datasets, # if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue. dataset : name : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.) root : data/m3fd # dataset root path # only available for fuse & detect detect: hsv : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing] degrees : 0 # image rotation (+/- degrees) [developing] translate : 0.1 # image translation (+/- fraction) [developing] scale : 0.9 # image scale (+/- gain) [developing] shear : 0.0 # image shear (+/- degrees) [developing] perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 [developing] flip_ud : 0.0 # image flip up-down (probability) flip_lr : 0.5 # image flip left-right (probability) # train settings: train : image_size : [ 320, 320 ] # training image size in (h, w) batch_size : 16 # batch size used to train num_workers : 8 # number of workers used in data loading epochs : 300 # number of epochs to train eval_interval: 1 # evaluation interval during training save_interval: 5 # save interval during training freeze : [ ] # freeze layers (e.g. backbone, head, ...) # inference settings: inference: batch_size : 8 # batch size used to train num_workers: 8 # number of workers used in data loading use_eval : true # use eval mode in inference mode, default true, false for v0 weights. grayscale : false # ignore dataset settings, save as grayscale image save_txt : false # save label file # loss settings: loss : # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det fuse : src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim src : 0.8 # src loss gain (1 during v0) adv : 0.2 # adv loss gain (0.1 during v0) t_adv : 0.5 # target loss gain d_adv : 0.5 # detail loss gain det : 1.0 # det loss gain (available only for detect or fuse+detect mode) d_mask: false # use mask for detail discriminator (v0: true) d_warm: 10 # discriminator warmup epochs # detect loss: box + cls + obj detect: box : 0.05 # box loss gain cls : 0.3 # cls loss gain cls_pw : 1.0 # cls BCELoss positive weight obj : 0.7 # obj loss gain (scale with pixels) obj_pw : 1.0 # obj BCELoss positive weight iou_t : 0.20 # IoU training threshold anchor_t: 4.0 # anchor-multiple threshold fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) # bridge bridge: fuse : 0.5 # fuse loss gain for generator detect: 0.5 # detect loss gain for generator warm : 2 # bridge warm up epochs (det -> det, fuse -> fuse) # optimizer settings: optimizer: name : sgd # optimizer name lr_i : 1.0e-2 # initial learning rate lr_f : 1.0e-1 # final learning rate (lr_i * lr_f) momentum : 0.937 # adam beta1 weight_decay: 5.0e-4 # decay rate used in optimizer lr_d : 1.0e-4 # discriminator learning rate # scheduler settings: scheduler: warmup_epochs : [ 2.0, 3.0 ] # start-[0]: bridge warm (keep const), [0]-[1]: normal warm, [1]-end: normal decay warmup_momentum: 0.8 # warmup initial momentum warmup_bias_lr : 0.1 # warmup initial bias lr ================================================ FILE: config/official/train/tardal-dt.yaml ================================================ # base settings device : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...) save_dir : 'cache' # folder used for saving the model, logs results # debug mode settings debug : log : INFO # log level wandb_mode: 'online' # wandb connection mode fast_run : false # use a small subset of the dataset for debugging code # framework training strategy: # backward method: fuse (direct training DT) # backward method: detect (task-oriented training TT) # backward method: fuse & detect (cooperative training CT) strategy : fuse # fuse network settings: core of infrared and visible fusion fuse : dim : 32 # features base dimensions for generator and discriminator depth : 3 # depth of dense architecture pretrained: ~ # ~: disable, path or url: load with pretrained parameters # detect network settings: available if framework in joint mode (detect, fuse + detect) detect : model : yolov5s # yolo model (yolov5 n,s,m,l,x) channels : 3 # input channels (3: rgb or 1: grayscale) pretrained: ~ # ~: disable, path or url: load with pretrained parameters # saliency network settings: generating mask for training tardal saliency : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth # iqa settings: information measurement iqa : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth # dataset settings: # we provide four built-in representative datasets, # if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue. dataset : name : RoadScene # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.) root : data/roadscene # dataset root path # only available for fuse & detect detect: hsv : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing] degrees : 0 # image rotation (+/- degrees) [developing] translate : 0.1 # image translation (+/- fraction) [developing] scale : 0.9 # image scale (+/- gain) [developing] shear : 0.0 # image shear (+/- degrees) [developing] perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 [developing] flip_ud : 0.0 # image flip up-down (probability) flip_lr : 0.5 # image flip left-right (probability) # train settings: train : image_size : [ 224, 224 ] # training image size in (h, w) batch_size : 32 # batch size used to train num_workers : 12 # number of workers used in data loading epochs : 1000 # number of epochs to train eval_interval: 5 # evaluation interval during training save_interval: 5 # save interval during training freeze : [ ] # freeze layers (e.g. backbone, head, ...) # inference settings: inference: batch_size : 8 # batch size used to train num_workers: 12 # number of workers used in data loading use_eval : true # use eval mode in inference mode, default true, false for v0 weights. grayscale : false # ignore dataset settings, save as grayscale image # loss settings: loss : # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det fuse : src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim src : 0.8 # src loss gain (1 during v0) adv : 0.2 # adv loss gain (0.1 during v0) t_adv : 0.5 # target loss gain d_adv : 0.5 # detail loss gain det : 1.0 # det loss gain (available only for detect or fuse+detect mode) d_mask: false # use mask for detail discriminator (v0: true) d_warm: 10 # discriminator warmup epochs # detect loss: box + cls + obj detect: box : 0.05 # box loss gain cls : 0.5 # cls loss gain cls_pw : 1.0 # cls BCELoss positive weight obj : 1.0 # obj loss gain (scale with pixels) obj_pw : 1.0 # obj BCELoss positive weight iou_t : 0.20 # IoU training threshold anchor_t: 4.0 # anchor-multiple threshold fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) # bridge bridge: fuse : 0.5 # fuse loss gain for generator detect: 0.5 # detect loss gain for generator # optimizer settings: optimizer: name : adamw # optimizer name lr_i : 1.0e-3 # initial learning rate lr_f : 1.0e-3 # final learning rate momentum : 0.937 # adam beta1 weight_decay: 5.0e-4 # decay rate used in optimizer # scheduler settings: scheduler: warmup_epochs : 3.0 # warmup epochs warmup_momentum: 0.8 # warmup initial momentum warmup_bias_lr : 0.1 # warmup initial bias lr ================================================ FILE: config/official/train/tardal-tt.yaml ================================================ # base settings device : cuda # device used for training and evaluation (cpu, cuda, cuda0, cuda1, ...) save_dir : 'cache' # folder used for saving the model, logs results # debug mode settings debug : log : INFO # log level wandb_mode: 'online' # wandb connection mode fast_run : false # use a small subset of the dataset for debugging code # framework training strategy: # backward method: fuse (direct training DT) # backward method: detect (task-oriented training TT) # backward method: fuse & detect (cooperative training CT) strategy : detect # fuse network settings: core of infrared and visible fusion fuse : dim : 32 # features base dimensions for generator and discriminator depth : 3 # depth of dense architecture pretrained: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth # ~: disable, path or url: load with pretrained parameters # detect network settings: available if framework in joint mode (detect, fuse + detect) detect : model : yolov5s # yolo model (yolov5 n,s,m,l,x) channels : 3 # input channels (3: rgb or 1: grayscale) pretrained: ~ # ~: disable, path or url: load with pretrained parameters # saliency network settings: generating mask for training tardal saliency : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/mask-u2.pth # iqa settings: information measurement iqa : url: https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/iqa-vgg.pth # dataset settings: # we provide four built-in representative datasets, # if you want to use some custom datasets, please refer to the documentation to write yourself or open an issue. dataset : name : M3FD # dataset folder to be trained with (fuse: TNO, RoadScene; fuse & detect: M3FD, MultiSpectral, etc.) root : data/m3fd # dataset root path # only available for fuse & detect detect: hsv : [ 0.015,0.7,0.4 ] # image HSV augmentation (fraction) [developing] degrees : 0 # image rotation (+/- degrees) [developing] translate : 0.1 # image translation (+/- fraction) [developing] scale : 0.9 # image scale (+/- gain) [developing] shear : 0.0 # image shear (+/- degrees) [developing] perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 [developing] flip_ud : 0.0 # image flip up-down (probability) flip_lr : 0.5 # image flip left-right (probability) # train settings: train : image_size : [ 320, 320 ] # training image size in (h, w) batch_size : 16 # batch size used to train num_workers : 8 # number of workers used in data loading epochs : 300 # number of epochs to train eval_interval: 1 # evaluation interval during training save_interval: 5 # save interval during training freeze : [ ] # freeze layers (e.g. backbone, head, ...) # inference settings: inference: batch_size : 8 # batch size used to train num_workers: 8 # number of workers used in data loading use_eval : true # use eval mode in inference mode, default true, false for v0 weights. grayscale : false # ignore dataset settings, save as grayscale image # loss settings: loss : # fuse loss: src(l1+ssim/ms-ssim) + adv(target+detail) + det fuse : src_fn: v1 # v0: 1*ssim + 20*l1 | v1: ms-ssim src : 0.8 # src loss gain (1 during v0) adv : 0.2 # adv loss gain (0.1 during v0) t_adv : 0.5 # target loss gain d_adv : 0.5 # detail loss gain det : 1.0 # det loss gain (available only for detect or fuse+detect mode) d_mask: false # use mask for detail discriminator (v0: true) d_warm: 10 # discriminator warmup epochs # detect loss: box + cls + obj detect: box : 0.05 # box loss gain cls : 0.3 # cls loss gain cls_pw : 1.0 # cls BCELoss positive weight obj : 0.7 # obj loss gain (scale with pixels) obj_pw : 1.0 # obj BCELoss positive weight iou_t : 0.20 # IoU training threshold anchor_t: 4.0 # anchor-multiple threshold fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) # bridge bridge: fuse : 0.5 # fuse loss gain for generator detect: 0.5 # detect loss gain for generator warm : 2 # bridge warm up epochs (det -> det, fuse -> fuse) # optimizer settings: optimizer: name : sgd # optimizer name lr_i : 1.0e-2 # initial learning rate lr_f : 1.0e-1 # final learning rate (lr_i * lr_f) momentum : 0.937 # adam beta1 weight_decay: 5.0e-4 # decay rate used in optimizer lr_d : 1.0e-4 # discriminator learning rate # scheduler settings: scheduler: warmup_epochs : [ 2.0, 3.0 ] # start-[0]: bridge warm (keep const), [0]-[1]: normal warm, [1]-end: normal decay warmup_momentum: 0.8 # warmup initial momentum warmup_bias_lr : 0.1 # warmup initial bias lr ================================================ FILE: data/README.md ================================================ # Dataset Configure Reference ## Official Supported Datasets * TNO: fuse * RoadScene: fuse * MultiSpectral: fuse + detect * M3FD: fuse + detect ## Other Datasets You can write scripts for your own custom dataset in `loader/{$NAME}.py`, and raise a pull request (optional). ## Prepare Datasets should have the following structure: ``` data |__ TNO // name of the dataset |__ ir // infrared images |__ vi // visible images |__ meta // dataset meta information |__ train.txt // image name for training |__ val.txt // image name for validation |__ M3FD // name of the dataset |__ ir // infrared images |__ vi // visible images |__ labels // object labels (ground truth, cxcywh) |__ meta // dataset meta information |__ train.txt // image name for training |__ val.txt // image name for validation ``` ================================================ FILE: functions/__init__.py ================================================ ================================================ FILE: functions/div_loss.py ================================================ import logging import torch import torch.autograd as autograd def div_loss(disc, real_x, fake_x, wp: int = 6, eps: float = 1e-6): logging.debug(f'calculating div: real {real_x.mean():.2f}, fake {fake_x.mean():.2f}') alpha = torch.rand((real_x.shape[0], 1, 1, 1)).cuda() tmp_x = (alpha * real_x + (1 - alpha) * fake_x).requires_grad_(True) tmp_y = disc(tmp_x) grad = autograd.grad( outputs=tmp_y, inputs=tmp_x, grad_outputs=torch.ones_like(tmp_y), create_graph=True, retain_graph=True, only_inputs=True, )[0] grad = grad.view(tmp_x.shape[0], -1) + eps div = (grad.norm(2, dim=1) ** wp).mean() return div ================================================ FILE: functions/get_param_groups.py ================================================ from typing import List from torch import nn def get_param_groups(module) -> tuple[List, List, List]: group = [], [], [] bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers for v in module.modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): "bias" group[2].append(v.bias) if isinstance(v, bn): "weight (no decay)" group[1].append(v.weight) elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): "weight (with decay)" group[0].append(v.weight) return group ================================================ FILE: infer.py ================================================ import argparse import logging from pathlib import Path import torch.backends.cudnn import yaml import scripts from config import from_dict if __name__ == '__main__': # args parser parser = argparse.ArgumentParser() parser.add_argument('--cfg', default='config/default.yaml', help='config file path') parser.add_argument('--save_dir', default='runs/tmp', help='fusion result save folder') args = parser.parse_args() # init config config = yaml.safe_load(Path(args.cfg).open('r')) config = from_dict(config) # convert dict to object config = config # init logger log_f = '%(asctime)s | %(filename)s[line:%(lineno)d] | %(levelname)s | %(message)s' logging.basicConfig(level=config.debug.log, format=log_f) # init device & anomaly detector torch.backends.cudnn.benchmark = True torch.autograd.set_detect_anomaly(True) # choose inference script logging.info(f'enter {config.strategy} inference mode') match config.strategy: case 'fuse': infer_p = getattr(scripts, 'InferF') # check pretrained weights if config.fuse.pretrained is None: logging.warning('no pretrained weights specified, use official pretrained weights') config.fuse.pretrained = 'https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-dt.pth' case 'fuse & detect': infer_p = getattr(scripts, 'InferFD') # check pretrained weights if config.fuse.pretrained is None: logging.warning('no pretrained weights specified, use official pretrained weights') config.fuse.pretrained = 'https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/tardal-ct.pth' case 'detect': raise NotImplementedError('detect mode is useless during inference period, please use fuse & detect mode') case _: raise ValueError(f'unknown strategy: {config.strategy}') # create script instance infer = infer_p(config, args.save_dir) infer.run() ================================================ FILE: loader/__init__.py ================================================ from loader.m3fd import M3FD from loader.roadscene import RoadScene from loader.tno import TNO __all__ = ['TNO', 'RoadScene', 'M3FD'] ================================================ FILE: loader/m3fd.py ================================================ import logging import random from pathlib import Path from typing import Literal, List, Optional import torch from kornia.geometry import vflip, hflip, resize from torch import Tensor, Size from torch.utils.data import Dataset from torchvision.ops import box_convert from torchvision.transforms import Resize from torchvision.utils import draw_bounding_boxes from config import ConfigDict from loader.utils.checker import check_mask, check_image, check_labels, check_iqa, get_max_size from loader.utils.reader import gray_read, ycbcr_read, label_read, img_write, label_write from tools.scenario_reader import scenario_counter, generate_meta class M3FD(Dataset): type = 'fuse & detect' # dataset type: 'fuse' or 'fuse & detect' color = True # dataset visible format: false -> 'gray' or true -> 'color' classes = ['People', 'Car', 'Bus', 'Lamp', 'Motorcycle', 'Truck'] palette = ['#FF0000', '#C1C337', '#2FA7B4', '#F541C4', '#F84F2C', '#7D2CC8'] generate_meta_lock = False # generate meta once def __init__(self, root: str | Path, mode: Literal['train', 'val', 'pred'], config: ConfigDict): super().__init__() root = Path(root) self.root = root self.mode = mode self.config = config # check json meta config if M3FD.generate_meta_lock is False: if Path(root / 'meta' / 'scenario.json').exists(): logging.info('found scenario.json, generating train & val list.') scenario_counter(root / 'meta' / 'scenario.json') generate_meta(root) M3FD.generate_meta_lock = True else: logging.warning('not found scenario.json, using current train & val list.') # read corresponding list img_list = Path(root / 'meta' / f'{mode}.txt').read_text().splitlines() logging.info(f'load {len(img_list)} images from {root.name}') self.img_list = img_list # check images check_image(root, img_list) # check labels self.labels = check_labels(root, img_list) # more check match mode: case 'train' | 'val': # check mask cache check_mask(root, img_list, config) # check iqa cache check_iqa(root, img_list, config) case _: # get max shape self.max_size = get_max_size(root, img_list) self.transform_fn = Resize(size=self.max_size) def __len__(self) -> int: return len(self.img_list) def __getitem__(self, index: int) -> dict: # choose get item method match self.mode: case 'train' | 'val': return self.train_val_item(index) case _: return self.pred_item(index) def train_val_item(self, index: int) -> dict: # image name, like '028.png' name = self.img_list[index] logging.debug(f'train-val mode: loading item {name}') # load infrared and visible ir = gray_read(self.root / 'ir' / name) vi, cbcr = ycbcr_read(self.root / 'vi' / name) # load mask mask = gray_read(self.root / 'mask' / name) # load information measurement ir_w = gray_read(self.root / 'iqa' / 'ir' / name) vi_w = gray_read(self.root / 'iqa' / 'vi' / name) # load label label_p = Path(name).stem + '.txt' labels = label_read(self.root / 'labels' / label_p) # concat images for transform(s) t = torch.cat([ir, vi, mask, ir_w, vi_w, cbcr], dim=0) # transform (resize) resize_fn = Resize(size=self.config.train.image_size) t = resize_fn(t) # transform (flip up-down) if random.random() < self.config.dataset.detect.flip_ud: t = vflip(t) if len(labels): labels[:, 2] = 1 - labels[:, 2] # transform (flip left-right) if random.random() < self.config.dataset.detect.flip_lr: t = hflip(t) if len(labels): labels[:, 1] = 1 - labels[:, 1] # transform labels (cls, x1, y1, x2, y2) -> (0, cls, ...) labels_o = torch.zeros((len(labels), 6)) if len(labels): labels_o[:, 1:] = labels # unpack images ir, vi, mask, ir_w, vi_w, cbcr = torch.split(t, [1, 1, 1, 1, 1, 2], dim=0) # merge data sample = { 'name': name, 'ir': ir, 'vi': vi, 'ir_w': ir_w, 'vi_w': vi_w, 'mask': mask, 'cbcr': cbcr, 'labels': labels_o } # return as expected return sample def pred_item(self, index: int) -> dict: # image name, like '028.png' name = self.img_list[index] logging.debug(f'pred mode: loading item {name}') # load infrared and visible ir = gray_read(self.root / 'ir' / name) vi, cbcr = ycbcr_read(self.root / 'vi' / name) # transform (resize) s = ir.shape[1:] t = torch.cat([ir, vi, cbcr], dim=0) ir, vi, cbcr = torch.split(self.transform_fn(t), [1, 1, 2], dim=0) # merge data sample = {'name': name, 'ir': ir, 'vi': vi, 'cbcr': cbcr, 'shape': s} # return as expected return sample @staticmethod def pred_save(fus: Tensor, names: List[str | Path], shape: List[Size], pred: Optional[Tensor] = None, save_txt: bool = False): if pred is None: return M3FD.pred_save_no_boxes(fus, names, shape) return M3FD.pred_save_with_boxes(fus, names, shape, pred, save_txt) @staticmethod def pred_save_no_boxes(fus: Tensor, names: List[str | Path], shape: List[Size]): for img_t, img_p, img_s in zip(fus, names, shape): img_t = resize(img_t, img_s) img_write(img_t, img_p) @staticmethod def pred_save_with_boxes(fus: Tensor, names: List[str | Path], shape: List[Size], pred: Tensor, save_txt: bool = False): for img_t, img_p, img_s, pred_i in zip(fus, names, shape, pred): # reshape target cur_s = img_t.shape[1:] scale_x, scale_y = cur_s[1] / img_s[1], cur_s[0] / img_s[0] pred_i[:, :4] *= Tensor([scale_x, scale_y, scale_x, scale_y]).to(pred_i.device) # reshape image img_t = resize(img_t, img_s) img = (img_t.clamp_(0, 1) * 255).to(torch.uint8) # draw bounding box pred_x = list(filter(lambda x: x[4] > 0.6, pred_i)) boxes = [x[:4] for x in pred_x] cls_idx = [int(x[5].cpu().numpy()) for x in pred_x] labels = [f'{M3FD.classes[cls]}: {x[4].cpu().numpy():.2f}' for cls, x in zip(cls_idx, pred_x)] colors = [M3FD.palette[cls] for cls, x in zip(cls_idx, pred_x)] if len(boxes): img = draw_bounding_boxes(img, torch.stack(boxes, dim=0), labels, colors, width=2) img = img.float() / 255 # save labeled images img_p = Path(img_p.parent) / 'images' / img_p.name img_write(img, img_p) # save label txt if save_txt: txt_p = Path(str(img_p.parent).replace('images', 'labels')) / (img_p.stem + '.txt') txt_p.unlink(missing_ok=True) txt_p.touch() pred_i[:, :4] /= Tensor([img_s[1], img_s[0], img_s[1], img_s[0]]).to(pred_i.device) pred_i[:, :4] = box_convert(pred_i[:, :4], 'xyxy', 'cxcywh') label_write(pred_i, txt_p) @staticmethod def collate_fn(data: List[dict]) -> dict: # keys keys = data[0].keys() # merge new_data = {} for key in keys: k_data = [d[key] for d in data] match key: case 'name' | 'shape': # (name, name) new_data[key] = k_data case 'labels': # (labels, image_index) for i, lb in enumerate(k_data): lb[:, 0] = i new_data[key] = torch.cat(k_data, dim=0) case _: # (img, img) new_data[key] = torch.stack(k_data, dim=0) # return as expected return new_data ================================================ FILE: loader/roadscene.py ================================================ import logging from pathlib import Path from typing import Literal, List import torch from kornia.geometry import resize from torch import Tensor, Size from torch.utils.data import Dataset from torchvision.transforms import Resize from config import ConfigDict from loader.utils.checker import check_mask, check_image, check_iqa, get_max_size from loader.utils.reader import gray_read, ycbcr_read, img_write class RoadScene(Dataset): type = 'fuse' # dataset type: 'fuse' or 'fuse & detect' color = True # dataset visible format: false -> 'gray' or true -> 'color' def __init__(self, root: str | Path, mode: Literal['train', 'val', 'pred'], config: ConfigDict): super().__init__() root = Path(root) self.root = root self.mode = mode # read corresponding list img_list = Path(root / 'meta' / f'{mode}.txt').read_text().splitlines() logging.info(f'load {len(img_list)} images from {root.name}') self.img_list = img_list # check images check_image(root, img_list) # more check match mode: case 'train' | 'val': # check mask cache check_mask(root, img_list, config) # check iqa cache check_iqa(root, img_list, config) case _: # get max shape self.max_size = get_max_size(root, img_list) # choose transform match mode: case 'train' | 'val': self.transform_fn = Resize(size=config.train.image_size) case _: self.transform_fn = Resize(size=self.max_size) def __len__(self) -> int: return len(self.img_list) def __getitem__(self, index: int) -> dict: # choose get item method match self.mode: case 'train' | 'val': return self.train_val_item(index) case _: return self.pred_item(index) def train_val_item(self, index: int) -> dict: # image name, like '003.png' name = self.img_list[index] logging.debug(f'train-val mode: loading item {name}') # load infrared and visible ir = gray_read(self.root / 'ir' / name) vi, cbcr = ycbcr_read(self.root / 'vi' / name) # load mask mask = gray_read(self.root / 'mask' / name) # load information measurement ir_w = gray_read(self.root / 'iqa' / 'ir' / name) vi_w = gray_read(self.root / 'iqa' / 'vi' / name) # transform (resize) t = torch.cat([ir, vi, mask, ir_w, vi_w, cbcr], dim=0) ir, vi, mask, ir_w, vi_w, cbcr = torch.split(self.transform_fn(t), [1, 1, 1, 1, 1, 2], dim=0) # merge data sample = {'name': name, 'ir': ir, 'vi': vi, 'ir_w': ir_w, 'vi_w': vi_w, 'mask': mask, 'cbcr': cbcr} # return as expected return sample def pred_item(self, index: int) -> dict: # image name, like '003.png' name = self.img_list[index] logging.debug(f'pred mode: loading item {name}') # load infrared and visible ir = gray_read(self.root / 'ir' / name) vi, cbcr = ycbcr_read(self.root / 'vi' / name) # transform (resize) s = ir.shape[1:] t = torch.cat([ir, vi, cbcr], dim=0) ir, vi, cbcr = torch.split(self.transform_fn(t), [1, 1, 2], dim=0) # merge data sample = {'name': name, 'ir': ir, 'vi': vi, 'cbcr': cbcr, 'shape': s} # return as expected return sample @staticmethod def pred_save(fus: Tensor, names: List[str | Path], shape: List[Size]): for img_t, img_p, img_s in zip(fus, names, shape): img_t = resize(img_t, img_s) img_write(img_t, img_p) @staticmethod def collate_fn(data: List[dict]) -> dict: # keys keys = data[0].keys() # merge new_data = {} for key in keys: k_data = [d[key] for d in data] new_data[key] = k_data if isinstance(k_data[0], str) or isinstance(k_data[0], Size) else torch.stack(k_data) # return as expected return new_data ================================================ FILE: loader/tno.py ================================================ import logging from pathlib import Path from typing import Literal, List import torch from kornia.geometry import resize from torch import Tensor, Size from torch.utils.data import Dataset from torchvision.transforms import Resize from config import ConfigDict from loader.utils.checker import check_mask, check_image, check_iqa, get_max_size from loader.utils.reader import gray_read, img_write class TNO(Dataset): type = 'fuse' # dataset type: 'fuse' or 'fuse & detect' color = False # dataset visible format: false -> 'gray' or true -> 'color' def __init__(self, root: str | Path, mode: Literal['train', 'val', 'pred'], config: ConfigDict): super().__init__() root = Path(root) self.root = root self.mode = mode # read corresponding list img_list = Path(root / 'meta' / f'{mode}.txt').read_text().splitlines() logging.info(f'load {len(img_list)} images from {root.name}') self.img_list = img_list # check images check_image(root, img_list) # more check match mode: case 'train' | 'val': # check mask cache check_mask(root, img_list, config) # check iqa cache check_iqa(root, img_list, config) case _: # get max shape self.max_size = get_max_size(root, img_list) # choose transform match mode: case 'train' | 'val': self.transform_fn = Resize(size=config.train.image_size) case _: self.transform_fn = Resize(size=self.max_size) def __len__(self) -> int: return len(self.img_list) def __getitem__(self, index: int) -> dict: # choose get item method match self.mode: case 'train' | 'val': return self.train_val_item(index) case _: return self.pred_item(index) def train_val_item(self, index: int) -> dict: # image name, like '028.png' name = self.img_list[index] logging.debug(f'train-val mode: loading item {name}') # load infrared and visible ir = gray_read(self.root / 'ir' / name) vi = gray_read(self.root / 'vi' / name) # load mask mask = gray_read(self.root / 'mask' / name) # load information measurement ir_w = gray_read(self.root / 'iqa' / 'ir' / name) vi_w = gray_read(self.root / 'iqa' / 'vi' / name) # transform (resize) t = torch.cat([ir, vi, mask, ir_w, vi_w], dim=0) ir, vi, mask, ir_w, vi_w = torch.chunk(self.transform_fn(t), chunks=5, dim=0) # merge data sample = {'name': name, 'ir': ir, 'vi': vi, 'ir_w': ir_w, 'vi_w': vi_w, 'mask': mask} # return as expected return sample def pred_item(self, index: int) -> dict: # image name, like '028.png' name = self.img_list[index] logging.debug(f'pred mode: loading item {name}') # load infrared and visible ir = gray_read(self.root / 'ir' / name) vi = gray_read(self.root / 'vi' / name) # transform (resize) s = ir.shape[1:] t = torch.cat([ir, vi], dim=0) ir, vi = torch.chunk(self.transform_fn(t), chunks=2, dim=0) # merge data sample = {'name': name, 'ir': ir, 'vi': vi, 'shape': s} # return as expected return sample @staticmethod def pred_save(fus: Tensor, names: List[str | Path], shape: List[Size]): for img_t, img_p, img_s in zip(fus, names, shape): img_t = resize(img_t, img_s) img_write(img_t, img_p) @staticmethod def collate_fn(data: List[dict]) -> dict: # keys keys = data[0].keys() # merge new_data = {} for key in keys: k_data = [d[key] for d in data] new_data[key] = k_data if isinstance(k_data[0], str) or isinstance(k_data[0], Size) else torch.stack(k_data) # return as expected return new_data ================================================ FILE: loader/utils/__init__.py ================================================ ================================================ FILE: loader/utils/checker.py ================================================ import logging import sys from pathlib import Path from typing import List from torch import Tensor, Size from tqdm import tqdm from config import ConfigDict from loader.utils.reader import label_read, gray_read from pipeline.iqa import IQA from pipeline.saliency import Saliency def check_image(root: Path, img_list: List[str]): assert (root / 'ir').exists() and (root / 'vi').exists(), f'ir and vi folders are required' for img_name in img_list: if not (root / 'ir' / img_name).exists() or not (root / 'vi' / img_name).exists(): logging.fatal(f'empty img {img_name} in {root.name}') sys.exit(1) logging.info('find all images on list') def check_iqa(root: Path, img_list: List[str], config: ConfigDict): iqa_cache = True if (root / 'iqa').exists(): for img_name in img_list: if not (root / 'iqa' / 'ir' / img_name).exists() or not (root / 'iqa' / 'vi' / img_name).exists(): iqa_cache = False break else: iqa_cache = False if iqa_cache: logging.info(f'find iqa cache in folder, skip information measurement') else: logging.info(f'find no iqa cache in folder, start information measurement') iqa = IQA(url=config.iqa.url) iqa.inference(src=root, dst=root / 'iqa') def check_labels(root: Path, img_list: List[str]) -> List[Tensor]: assert (root / 'labels').exists(), f'labels folder is required' labels = [] for img_name in img_list: label_name = Path(img_name).stem + '.txt' if not (root / 'labels' / label_name).exists(): logging.fatal(f'empty label {label_name} in {root.name}') sys.exit(1) labels.append(label_read(root / 'labels' / label_name)) logging.info('find all labels on list') return labels def check_mask(root: Path, img_list: List[str], config: ConfigDict): mask_cache = True if (root / 'mask').exists(): for img_name in img_list: if not (root / 'mask' / img_name).exists(): mask_cache = False break else: mask_cache = False if mask_cache: logging.info('find mask cache in folder, skip saliency detection') else: logging.info('find no mask cache in folder, start saliency detection') saliency = Saliency(url=config.saliency.url) saliency.inference(src=root / 'ir', dst=root / 'mask') def get_max_size(root: Path, img_list: List[str]): max_h, max_w = -1, -1 logging.info('find suitable size for prediction') img_l = tqdm(img_list) for img_name in img_l: img_l.set_description('finding suitable size') img = gray_read(root / 'ir' / img_name) max_h = max(max_h, img.shape[1]) max_w = max(max_w, img.shape[2]) logging.info(f'max size in dataset: H:{max_h} x W:{max_w}') return Size((max_h, max_w)) ================================================ FILE: loader/utils/reader.py ================================================ from pathlib import Path from typing import Tuple import cv2 import numpy import torch from kornia import image_to_tensor, tensor_to_image from kornia.color import rgb_to_ycbcr, bgr_to_rgb, rgb_to_bgr from torch import Tensor from torchvision.ops import box_convert def gray_read(img_path: str | Path) -> Tensor: img_n = cv2.imread(str(img_path), cv2.IMREAD_GRAYSCALE) img_t = image_to_tensor(img_n).float() / 255 return img_t def ycbcr_read(img_path: str | Path) -> Tuple[Tensor, Tensor]: img_n = cv2.imread(str(img_path), cv2.IMREAD_COLOR) img_t = image_to_tensor(img_n).float() / 255 img_t = rgb_to_ycbcr(bgr_to_rgb(img_t)) y, cbcr = torch.split(img_t, [1, 2], dim=0) return y, cbcr def label_read(label_path: str | Path) -> Tensor: target = numpy.loadtxt(str(label_path), dtype=numpy.float32) labels = torch.from_numpy(target).view(-1, 5) # (cls, cx, cy, w, h) labels[:, 1:] = box_convert(labels[:, 1:], 'cxcywh', 'xyxy') # (cls, x1, y1, x2, y2) return labels def img_write(img_t: Tensor, img_path: str | Path): if img_t.shape[0] == 3: img_t = rgb_to_bgr(img_t) img_n = tensor_to_image(img_t.squeeze().cpu()) * 255 cv2.imwrite(str(img_path), img_n) def label_write(pred_i: Tensor, txt_path: str | Path): for *pos, conf, cls in pred_i.tolist(): line = (cls, *pos, conf) with txt_path.open('a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') ================================================ FILE: module/__init__.py ================================================ ================================================ FILE: module/detect/README.md ================================================ # Detect Based on YOLOv5. Reference: [YOLOv5 official](https://github.com/ultralytics/yolov5) ================================================ FILE: module/detect/models/__init__.py ================================================ ================================================ FILE: module/detect/models/common.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Common modules """ import json import os import platform import sys import warnings from collections import OrderedDict, namedtuple from copy import copy from pathlib import Path import cv2 import math import numpy as np import pandas as pd import requests import torch import torch.nn as nn import yaml from PIL import Image from torch.cuda import amp FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH if platform.system() != 'Windows': ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from utils.dataloaders import exif_transpose, letterbox from utils.general import (LOGGER, check_requirements, check_suffix, check_version, colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, time_sync def autopad(k, p=None): # kernel, padding # Pad to 'same' if p is None: p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad return p class Conv(nn.Module): # Standard convolution def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) self.bn = nn.BatchNorm2d(c2) self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) def forward(self, x): return self.act(self.bn(self.conv(x))) def forward_fuse(self, x): return self.act(self.conv(x)) class DWConv(Conv): # Depth-wise convolution class def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act) class DWConvTranspose2d(nn.ConvTranspose2d): # Depth-wise transpose convolution class def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2)) class TransformerLayer(nn.Module): # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) def __init__(self, c, num_heads): super().__init__() self.q = nn.Linear(c, c, bias=False) self.k = nn.Linear(c, c, bias=False) self.v = nn.Linear(c, c, bias=False) self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) self.fc1 = nn.Linear(c, c, bias=False) self.fc2 = nn.Linear(c, c, bias=False) def forward(self, x): x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x x = self.fc2(self.fc1(x)) + x return x class TransformerBlock(nn.Module): # Vision Transformer https://arxiv.org/abs/2010.11929 def __init__(self, c1, c2, num_heads, num_layers): super().__init__() self.conv = None if c1 != c2: self.conv = Conv(c1, c2) self.linear = nn.Linear(c2, c2) # learnable position embedding self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers))) self.c2 = c2 def forward(self, x): if self.conv is not None: x = self.conv(x) b, _, w, h = x.shape p = x.flatten(2).permute(2, 0, 1) return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h) class Bottleneck(nn.Module): # Standard bottleneck def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_, c2, 3, 1, g=g) self.add = shortcut and c1 == c2 def forward(self, x): return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) class BottleneckCSP(nn.Module): # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) self.cv4 = Conv(2 * c_, c2, 1, 1) self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) self.act = nn.SiLU() self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) def forward(self, x): y1 = self.cv3(self.m(self.cv1(x))) y2 = self.cv2(x) return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) class CrossConv(nn.Module): # Cross Convolution Downsample def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): # ch_in, ch_out, kernel, stride, groups, expansion, shortcut super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, (1, k), (1, s)) self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) self.add = shortcut and c1 == c2 def forward(self, x): return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) class C3(nn.Module): # CSP Bottleneck with 3 convolutions def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) def forward(self, x): return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) class C3x(C3): # C3 module with cross-convolutions def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): super().__init__(c1, c2, n, shortcut, g, e) c_ = int(c2 * e) self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) class C3TR(C3): # C3 module with TransformerBlock() def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): super().__init__(c1, c2, n, shortcut, g, e) c_ = int(c2 * e) self.m = TransformerBlock(c_, c_, 4, n) class C3SPP(C3): # C3 module with SPP() def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): super().__init__(c1, c2, n, shortcut, g, e) c_ = int(c2 * e) self.m = SPP(c_, c_, k) class C3Ghost(C3): # C3 module with GhostBottleneck() def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): super().__init__(c1, c2, n, shortcut, g, e) c_ = int(c2 * e) # hidden channels self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n))) class SPP(nn.Module): # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729 def __init__(self, c1, c2, k=(5, 9, 13)): super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) def forward(self, x): x = self.cv1(x) with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) class SPPF(nn.Module): # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_ * 4, c2, 1, 1) self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) def forward(self, x): x = self.cv1(x) with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning y1 = self.m(x) y2 = self.m(y1) return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) class Focus(nn.Module): # Focus wh information into c-space def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups super().__init__() self.conv = Conv(c1 * 4, c2, k, s, p, g, act) # self.contract = Contract(gain=2) def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) # return self.conv(self.contract(x)) class GhostConv(nn.Module): # Ghost Convolution https://github.com/huawei-noah/ghostnet def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups super().__init__() c_ = c2 // 2 # hidden channels self.cv1 = Conv(c1, c_, k, s, None, g, act) self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) def forward(self, x): y = self.cv1(x) return torch.cat((y, self.cv2(y)), 1) class GhostBottleneck(nn.Module): # Ghost Bottleneck https://github.com/huawei-noah/ghostnet def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride super().__init__() c_ = c2 // 2 self.conv = nn.Sequential( GhostConv(c1, c_, 1, 1), # pw DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw GhostConv(c_, c2, 1, 1, act=False) ) # pw-linear self.shortcut = nn.Sequential( DWConv(c1, c1, k, s, act=False), Conv( c1, c2, 1, 1, act=False ) ) if s == 2 else nn.Identity() def forward(self, x): return self.conv(x) + self.shortcut(x) class Contract(nn.Module): # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) def __init__(self, gain=2): super().__init__() self.gain = gain def forward(self, x): b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain' s = self.gain x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2) x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40) class Expand(nn.Module): # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) def __init__(self, gain=2): super().__init__() self.gain = gain def forward(self, x): b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' s = self.gain x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80) x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160) class Concat(nn.Module): # Concatenate a list of tensors along dimension def __init__(self, dimension=1): super().__init__() self.d = dimension def forward(self, x): return torch.cat(x, self.d) class DetectMultiBackend(nn.Module): # YOLOv5 MultiBackend class for python inference on various backends def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True): # Usage: # PyTorch: weights = *.pt # TorchScript: *.torchscript # ONNX Runtime: *.onnx # ONNX OpenCV DNN: *.onnx with --dnn # OpenVINO: *.xml # CoreML: *.mlmodel # TensorRT: *.engine # TensorFlow SavedModel: *_saved_model # TensorFlow GraphDef: *.pb # TensorFlow Lite: *.tflite # TensorFlow Edge TPU: *_edgetpu.tflite from models.experimental import attempt_download, attempt_load # scoped to avoid circular import super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(w) # get backend w = attempt_download(w) # download if not local fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 stride, names = 32, [f'class{i}' for i in range(1000)] # assign defaults if data: # assign class names (optional) with open(data, errors='ignore') as f: names = yaml.safe_load(f)['names'] if pt: # PyTorch model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse) stride = max(int(model.stride.max()), 32) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names model.half() if fp16 else model.float() self.model = model # explicitly assign for to(), cpu(), cuda(), half() elif jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') extra_files = {'config.txt': ''} # model metadata model = torch.jit.load(w, _extra_files=extra_files) model.half() if fp16 else model.float() if extra_files['config.txt']: d = json.loads(extra_files['config.txt']) # extra_files dict stride, names = int(d['stride']), d['names'] elif dnn: # ONNX OpenCV DNN LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') check_requirements(('opencv-python>=4.5.4',)) net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') cuda = torch.cuda.is_available() check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) import onnxruntime providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] session = onnxruntime.InferenceSession(w, providers=providers) meta = session.get_modelmeta().custom_metadata_map # metadata if 'stride' in meta: stride, names = int(meta['stride']), eval(meta['names']) elif xml: # OpenVINO LOGGER.info(f'Loading {w} for OpenVINO inference...') check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ from openvino.runtime import Core, Layout, get_batch ie = Core() if not Path(w).is_file(): # if not *.xml w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) if network.get_parameters()[0].get_layout().empty: network.get_parameters()[0].set_layout(Layout("NCHW")) batch_dim = get_batch(network) if batch_dim.is_static: batch_size = batch_dim.get_length() executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 output_layer = next(iter(executable_network.outputs)) meta = Path(w).with_suffix('.yaml') if meta.exists(): stride, names = self._load_metadata(meta) # load metadata elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: model = runtime.deserialize_cuda_engine(f.read()) bindings = OrderedDict() fp16 = False # default updated below for index in range(model.num_bindings): name = model.get_binding_name(index) dtype = trt.nptype(model.get_binding_dtype(index)) shape = tuple(model.get_binding_shape(index)) data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) if model.binding_is_input(index) and dtype == np.float16: fp16 = True binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) context = model.create_execution_context() batch_size = bindings['images'].shape[0] elif coreml: # CoreML LOGGER.info(f'Loading {w} for CoreML inference...') import coremltools as ct model = ct.models.MLModel(w) else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) if saved_model: # SavedModel LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') import tensorflow as tf keras = False # assume TF1 saved_model model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') import tensorflow as tf def wrap_frozen_graph(gd, inputs, outputs): x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped ge = x.graph.as_graph_element return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) gd = tf.Graph().as_graph_def() # graph_def with open(w, 'rb') as f: gd.ParseFromString(f.read()) frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate except ImportError: import tensorflow as tf Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, if edgetpu: # Edge TPU https://coral.ai/software/#edgetpu-runtime LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') delegate = { 'Linux': 'libedgetpu.so.1', 'Darwin': 'libedgetpu.1.dylib', 'Windows': 'edgetpu.dll'}[platform.system()] interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) else: # Lite LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') interpreter = Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs elif tfjs: raise Exception('ERROR: YOLOv5 TF.js inference is not supported') else: raise Exception(f'ERROR: {w} is not a supported format') self.__dict__.update(locals()) # assign all variables to self def forward(self, im, augment=False, visualize=False, val=False): # YOLOv5 MultiBackend inference b, ch, h, w = im.shape # batch, channel, height, width if self.fp16 and im.dtype != torch.float16: im = im.half() # to FP16 if self.pt: # PyTorch y = self.model(im, augment=augment, visualize=visualize)[0] elif self.jit: # TorchScript y = self.model(im)[0] elif self.dnn: # ONNX OpenCV DNN im = im.cpu().numpy() # torch to numpy self.net.setInput(im) y = self.net.forward() elif self.onnx: # ONNX Runtime im = im.cpu().numpy() # torch to numpy y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] elif self.xml: # OpenVINO im = im.cpu().numpy() # FP32 y = self.executable_network([im])[self.output_layer] elif self.engine: # TensorRT assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape) self.binding_addrs['images'] = int(im.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) y = self.bindings['output'].data elif self.coreml: # CoreML im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) im = Image.fromarray((im[0] * 255).astype('uint8')) # im = im.resize((192, 320), Image.ANTIALIAS) y = self.model.predict({'image': im}) # coordinates are xywh normalized if 'confidence' in y: box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) else: k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key y = y[k] # output else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.saved_model: # SavedModel y = (self.model(im, training=False) if self.keras else self.model(im)).numpy() elif self.pb: # GraphDef y = self.frozen_func(x=self.tf.constant(im)).numpy() else: # Lite or Edge TPU input, output = self.input_details[0], self.output_details[0] int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model if int8: scale, zero_point = input['quantization'] im = (im / scale + zero_point).astype(np.uint8) # de-scale self.interpreter.set_tensor(input['index'], im) self.interpreter.invoke() y = self.interpreter.get_tensor(output['index']) if int8: scale, zero_point = output['quantization'] y = (y.astype(np.float32) - zero_point) * scale # re-scale y[..., :4] *= [w, h, w, h] # xywh normalized to pixels if isinstance(y, np.ndarray): y = torch.tensor(y, device=self.device) return (y, []) if val else y def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb if any(warmup_types) and self.device.type != 'cpu': im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input for _ in range(2 if self.jit else 1): # self.forward(im) # warmup @staticmethod def model_type(p='path/to/model.pt'): # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx from export import export_formats suffixes = list(export_formats().Suffix) + ['.xml'] # export suffixes check_suffix(p, suffixes) # checks p = Path(p).name # eliminate trailing separators pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, xml2 = (s in p for s in suffixes) xml |= xml2 # *_openvino_model or *.xml tflite &= not edgetpu # *.tflite return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs @staticmethod def _load_metadata(f='path/to/meta.yaml'): # Load metadata from meta.yaml if it exists with open(f, errors='ignore') as f: d = yaml.safe_load(f) return d['stride'], d['names'] # assign stride, names class AutoShape(nn.Module): # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS conf = 0.25 # NMS confidence threshold iou = 0.45 # NMS IoU threshold agnostic = False # NMS class-agnostic multi_label = False # NMS multiple labels per box classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs max_det = 1000 # maximum number of detections per image amp = False # Automatic Mixed Precision (AMP) inference def __init__(self, model, verbose=True): super().__init__() if verbose: LOGGER.info('Adding AutoShape... ') copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance self.pt = not self.dmb or model.pt # PyTorch model self.model = model.eval() def _apply(self, fn): # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers self = super()._apply(fn) if self.pt: m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() m.stride = fn(m.stride) m.grid = list(map(fn, m.grid)) if isinstance(m.anchor_grid, list): m.anchor_grid = list(map(fn, m.anchor_grid)) return self @torch.no_grad() def forward(self, imgs, size=640, augment=False, profile=False): # Inference from various sources. For height=640, width=1280, RGB images example inputs are: # file: imgs = 'data/images/zidane.jpg' # str or PosixPath # URI: = 'https://ultralytics.com/images/zidane.jpg' # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3) # numpy: = np.zeros((640,1280,3)) # HWC # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images t = [time_sync()] p = next(self.model.parameters()) if self.pt else torch.zeros(1, device=self.model.device) # for device, type autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference if isinstance(imgs, torch.Tensor): # torch with amp.autocast(autocast): return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference # Pre-process n, imgs = (len(imgs), list(imgs)) if isinstance(imgs, (list, tuple)) else (1, [imgs]) # number, list of images shape0, shape1, files = [], [], [] # image and inference shapes, filenames for i, im in enumerate(imgs): f = f'image{i}' # filename if isinstance(im, (str, Path)): # filename or uri im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im im = np.asarray(exif_transpose(im)) elif isinstance(im, Image.Image): # PIL Image im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f files.append(Path(f).with_suffix('.jpg').name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input s = im.shape[:2] # HWC shape0.append(s) # image shape g = (size / max(s)) # gain shape1.append([y * g for y in s]) imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape x = [letterbox(im, shape1, auto=False)[0] for im in imgs] # pad x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 t.append(time_sync()) with amp.autocast(autocast): # Inference y = self.model(x, augment, profile) # forward t.append(time_sync()) # Post-process y = non_max_suppression( y if self.dmb else y[0], self.conf, self.iou, self.classes, self.agnostic, self.multi_label, max_det=self.max_det ) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) t.append(time_sync()) return Detections(imgs, y, files, t, self.names, x.shape) class Detections: # YOLOv5 detections class for inference results def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None): super().__init__() d = pred[0].device # device gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations self.imgs = imgs # list of images as numpy arrays self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names self.files = files # image filenames self.times = times # profiling times self.xyxy = pred # xyxy pixels self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized self.n = len(self.pred) # number of images (batch size) self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) self.s = shape # inference BCHW shape def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): crops = [] for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string if pred.shape[0]: for c in pred[:, -1].unique(): n = (pred[:, -1] == c).sum() # detections per class s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render or crop: annotator = Annotator(im, example=str(self.names)) for *box, conf, cls in reversed(pred): # xyxy, confidence, class label = f'{self.names[int(cls)]} {conf:.2f}' if crop: file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None crops.append( { 'box': box, 'conf': conf, 'cls': cls, 'label': label, 'im': save_one_box(box, im, file=file, save=save)} ) else: # all others annotator.box_label(box, label if labels else '', color=colors(cls)) im = annotator.im else: s += '(no detections)' im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if pprint: print(s.rstrip(', ')) if show: im.show(self.files[i]) # show if save: f = self.files[i] im.save(save_dir / f) # save if i == self.n - 1: LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") if render: self.imgs[i] = np.asarray(im) if crop: if save: LOGGER.info(f'Saved results to {save_dir}\n') return crops def print(self): self.display(pprint=True) # print results print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) def show(self, labels=True): self.display(show=True, labels=labels) # show results def save(self, labels=True, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir self.display(save=True, labels=labels, save_dir=save_dir) # save results def crop(self, save=True, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None return self.display(crop=True, save=save, save_dir=save_dir) # crop results def render(self, labels=True): self.display(render=True, labels=labels) # render results return self.imgs def pandas(self): # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) new = copy(self) # return copy ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) return new def tolist(self): # return a list of Detections objects, i.e. 'for result in results.tolist():' r = range(self.n) # iterable x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] # for d in x: # for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: # setattr(d, k, getattr(d, k)[0]) # pop out of list return x def __len__(self): return self.n # override len(results) def __str__(self): self.print() # override print(results) return '' class Classify(nn.Module): # Classification head, i.e. x(b,c1,20,20) to x(b,c2) def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups super().__init__() self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) self.flat = nn.Flatten() def forward(self, x): z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list return self.flat(self.conv(z)) # flatten to x(b,c2) ================================================ FILE: module/detect/models/experimental.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Experimental modules """ import math import numpy as np import torch import torch.nn as nn from models.common import Conv from utils.downloads import attempt_download class Sum(nn.Module): # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 def __init__(self, n, weight=False): # n: number of inputs super().__init__() self.weight = weight # apply weights boolean self.iter = range(n - 1) # iter object if weight: self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights def forward(self, x): y = x[0] # no weight if self.weight: w = torch.sigmoid(self.w) * 2 for i in self.iter: y = y + x[i + 1] * w[i] else: for i in self.iter: y = y + x[i + 1] return y class MixConv2d(nn.Module): # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy super().__init__() n = len(k) # number of convolutions if equal_ch: # equal c_ per group i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices c_ = [(i == g).sum() for g in range(n)] # intermediate channels else: # equal weight.numel() per group b = [c2] + [0] * n a = np.eye(n + 1, n, k=-1) a -= np.roll(a, 1, axis=1) a *= np.array(k) ** 2 a[0] = 1 c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b self.m = nn.ModuleList([ nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) self.bn = nn.BatchNorm2d(c2) self.act = nn.SiLU() def forward(self, x): return self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) class Ensemble(nn.ModuleList): # Ensemble of models def __init__(self): super().__init__() def forward(self, x, augment=False, profile=False, visualize=False): y = [module(x, augment, profile, visualize)[0] for module in self] # y = torch.stack(y).max(0)[0] # max ensemble # y = torch.stack(y).mean(0) # mean ensemble y = torch.cat(y, 1) # nms ensemble return y, None # inference, train output def attempt_load(weights, device=None, inplace=True, fuse=True): # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a from models.yolo import Detect, Model model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: ckpt = torch.load(attempt_download(w), map_location='cpu') # load ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model model.append(ckpt.fuse().eval() if fuse else ckpt.eval()) # fused or un-fused model in eval mode # Compatibility updates for m in model.modules(): t = type(m) if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): m.inplace = inplace # torch 1.7.0 compatibility if t is Detect and not isinstance(m.anchor_grid, list): delattr(m, 'anchor_grid') setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) elif t is Conv: m._non_persistent_buffers_set = set() # torch 1.6.0 compatibility elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): m.recompute_scale_factor = None # torch 1.11.0 compatibility if len(model) == 1: return model[-1] # return model print(f'Ensemble created with {weights}\n') for k in 'names', 'nc', 'yaml': setattr(model, k, getattr(model[0], k)) model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}' return model # return ensemble ================================================ FILE: module/detect/models/hub/anchors.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Default anchors for COCO data # P5 ------------------------------------------------------------------------------------------------------------------- # P5-640: anchors_p5_640: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 # P6 ------------------------------------------------------------------------------------------------------------------- # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 anchors_p6_640: - [9,11, 21,19, 17,41] # P3/8 - [43,32, 39,70, 86,64] # P4/16 - [65,131, 134,130, 120,265] # P5/32 - [282,180, 247,354, 512,387] # P6/64 # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 anchors_p6_1280: - [19,27, 44,40, 38,94] # P3/8 - [96,68, 86,152, 180,137] # P4/16 - [140,301, 303,264, 238,542] # P5/32 - [436,615, 739,380, 925,792] # P6/64 # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 anchors_p6_1920: - [28,41, 67,59, 57,141] # P3/8 - [144,103, 129,227, 270,205] # P4/16 - [209,452, 455,396, 358,812] # P5/32 - [653,922, 1109,570, 1387,1187] # P6/64 # P7 ------------------------------------------------------------------------------------------------------------------- # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 anchors_p7_640: - [11,11, 13,30, 29,20] # P3/8 - [30,46, 61,38, 39,92] # P4/16 - [78,80, 146,66, 79,163] # P5/32 - [149,150, 321,143, 157,303] # P6/64 - [257,402, 359,290, 524,372] # P7/128 # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 anchors_p7_1280: - [19,22, 54,36, 32,77] # P3/8 - [70,83, 138,71, 75,173] # P4/16 - [165,159, 148,334, 375,151] # P5/32 - [334,317, 251,626, 499,474] # P6/64 - [750,326, 534,814, 1079,818] # P7/128 # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 anchors_p7_1920: - [29,34, 81,55, 47,115] # P3/8 - [105,124, 207,107, 113,259] # P4/16 - [247,238, 222,500, 563,227] # P5/32 - [501,476, 376,939, 749,711] # P6/64 - [1126,489, 801,1222, 1618,1227] # P7/128 ================================================ FILE: module/detect/models/hub/yolov3-spp.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 # darknet53 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [32, 3, 1]], # 0 [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 [-1, 1, Bottleneck, [64]], [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 [-1, 2, Bottleneck, [128]], [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 [-1, 8, Bottleneck, [256]], [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 [-1, 8, Bottleneck, [512]], [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 [-1, 4, Bottleneck, [1024]], # 10 ] # YOLOv3-SPP head head: [[-1, 1, Bottleneck, [1024, False]], [-1, 1, SPP, [512, [5, 9, 13]]], [-1, 1, Conv, [1024, 3, 1]], [-1, 1, Conv, [512, 1, 1]], [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) [-2, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 8], 1, Concat, [1]], # cat backbone P4 [-1, 1, Bottleneck, [512, False]], [-1, 1, Bottleneck, [512, False]], [-1, 1, Conv, [256, 1, 1]], [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) [-2, 1, Conv, [128, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P3 [-1, 1, Bottleneck, [256, False]], [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] ================================================ FILE: module/detect/models/hub/yolov3-tiny.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - [10,14, 23,27, 37,58] # P4/16 - [81,82, 135,169, 344,319] # P5/32 # YOLOv3-tiny backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [16, 3, 1]], # 0 [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 [-1, 1, Conv, [32, 3, 1]], [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 [-1, 1, Conv, [64, 3, 1]], [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 [-1, 1, Conv, [128, 3, 1]], [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 [-1, 1, Conv, [256, 3, 1]], [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 [-1, 1, Conv, [512, 3, 1]], [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 ] # YOLOv3-tiny head head: [[-1, 1, Conv, [1024, 3, 1]], [-1, 1, Conv, [256, 1, 1]], [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) [-2, 1, Conv, [128, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 8], 1, Concat, [1]], # cat backbone P4 [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) ] ================================================ FILE: module/detect/models/hub/yolov3.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 # darknet53 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [32, 3, 1]], # 0 [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 [-1, 1, Bottleneck, [64]], [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 [-1, 2, Bottleneck, [128]], [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 [-1, 8, Bottleneck, [256]], [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 [-1, 8, Bottleneck, [512]], [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 [-1, 4, Bottleneck, [1024]], # 10 ] # YOLOv3 head head: [[-1, 1, Bottleneck, [1024, False]], [-1, 1, Conv, [512, 1, 1]], [-1, 1, Conv, [1024, 3, 1]], [-1, 1, Conv, [512, 1, 1]], [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) [-2, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 8], 1, Concat, [1]], # cat backbone P4 [-1, 1, Bottleneck, [512, False]], [-1, 1, Bottleneck, [512, False]], [-1, 1, Conv, [256, 1, 1]], [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) [-2, 1, Conv, [128, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P3 [-1, 1, Bottleneck, [256, False]], [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] ================================================ FILE: module/detect/models/hub/yolov5-bifpn.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 [-1, 3, C3, [1024]], [-1, 1, SPPF, [1024, 5]], # 9 ] # YOLOv5 v6.0 BiFPN head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 13 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 17 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change [-1, 3, C3, [512, False]], # 20 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 10], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] ================================================ FILE: module/detect/models/hub/yolov5-fpn.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 [-1, 3, C3, [1024]], [-1, 1, SPPF, [1024, 5]], # 9 ] # YOLOv5 v6.0 FPN head head: [[-1, 3, C3, [1024, False]], # 10 (P5/32-large) [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 1, Conv, [512, 1, 1]], [-1, 3, C3, [512, False]], # 14 (P4/16-medium) [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 1, Conv, [256, 1, 1]], [-1, 3, C3, [256, False]], # 18 (P3/8-small) [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] ================================================ FILE: module/detect/models/hub/yolov5-p2.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: 3 # AutoAnchor evolves 3 anchors per P output layer # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 [-1, 3, C3, [1024]], [-1, 1, SPPF, [1024, 5]], # 9 ] # YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 13 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 17 (P3/8-small) [-1, 1, Conv, [128, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 2], 1, Concat, [1]], # cat backbone P2 [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall) [-1, 1, Conv, [128, 3, 2]], [[-1, 18], 1, Concat, [1]], # cat head P3 [-1, 3, C3, [256, False]], # 24 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 14], 1, Concat, [1]], # cat head P4 [-1, 3, C3, [512, False]], # 27 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 10], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [1024, False]], # 30 (P5/32-large) [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5) ] ================================================ FILE: module/detect/models/hub/yolov5-p34.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple anchors: 3 # AutoAnchor evolves 3 anchors per P output layer # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2 [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 [ -1, 3, C3, [ 128 ] ], [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 [ -1, 6, C3, [ 256 ] ], [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 [ -1, 9, C3, [ 512 ] ], [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 [ -1, 3, C3, [ 1024 ] ], [ -1, 1, SPPF, [ 1024, 5 ] ], # 9 ] # YOLOv5 v6.0 head with (P3, P4) outputs head: [ [ -1, 1, Conv, [ 512, 1, 1 ] ], [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 [ -1, 3, C3, [ 512, False ] ], # 13 [ -1, 1, Conv, [ 256, 1, 1 ] ], [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) [ -1, 1, Conv, [ 256, 3, 2 ] ], [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium) [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4) ] ================================================ FILE: module/detect/models/hub/yolov5-p6.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: 3 # AutoAnchor evolves 3 anchors per P output layer # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 [-1, 3, C3, [768]], [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 [-1, 3, C3, [1024]], [-1, 1, SPPF, [1024, 5]], # 11 ] # YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs head: [[-1, 1, Conv, [768, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 8], 1, Concat, [1]], # cat backbone P5 [-1, 3, C3, [768, False]], # 15 [-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 19 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 23 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 20], 1, Concat, [1]], # cat head P4 [-1, 3, C3, [512, False]], # 26 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 16], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [768, False]], # 29 (P5/32-large) [-1, 1, Conv, [768, 3, 2]], [[-1, 12], 1, Concat, [1]], # cat head P6 [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] ================================================ FILE: module/detect/models/hub/yolov5-p7.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: 3 # AutoAnchor evolves 3 anchors per P output layer # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 [-1, 3, C3, [768]], [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 [-1, 3, C3, [1024]], [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128 [-1, 3, C3, [1280]], [-1, 1, SPPF, [1280, 5]], # 13 ] # YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs head: [[-1, 1, Conv, [1024, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 10], 1, Concat, [1]], # cat backbone P6 [-1, 3, C3, [1024, False]], # 17 [-1, 1, Conv, [768, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 8], 1, Concat, [1]], # cat backbone P5 [-1, 3, C3, [768, False]], # 21 [-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 25 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 29 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 26], 1, Concat, [1]], # cat head P4 [-1, 3, C3, [512, False]], # 32 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 22], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [768, False]], # 35 (P5/32-large) [-1, 1, Conv, [768, 3, 2]], [[-1, 18], 1, Concat, [1]], # cat head P6 [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge) [-1, 1, Conv, [1024, 3, 2]], [[-1, 14], 1, Concat, [1]], # cat head P7 [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge) [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7) ] ================================================ FILE: module/detect/models/hub/yolov5-panet.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 [-1, 3, C3, [1024]], [-1, 1, SPPF, [1024, 5]], # 9 ] # YOLOv5 v6.0 PANet head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 13 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 17 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 14], 1, Concat, [1]], # cat head P4 [-1, 3, C3, [512, False]], # 20 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 10], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] ================================================ FILE: module/detect/models/hub/yolov5l6.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - [19,27, 44,40, 38,94] # P3/8 - [96,68, 86,152, 180,137] # P4/16 - [140,301, 303,264, 238,542] # P5/32 - [436,615, 739,380, 925,792] # P6/64 # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 [-1, 3, C3, [768]], [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 [-1, 3, C3, [1024]], [-1, 1, SPPF, [1024, 5]], # 11 ] # YOLOv5 v6.0 head head: [[-1, 1, Conv, [768, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 8], 1, Concat, [1]], # cat backbone P5 [-1, 3, C3, [768, False]], # 15 [-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 19 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 23 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 20], 1, Concat, [1]], # cat head P4 [-1, 3, C3, [512, False]], # 26 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 16], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [768, False]], # 29 (P5/32-large) [-1, 1, Conv, [768, 3, 2]], [[-1, 12], 1, Concat, [1]], # cat head P6 [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] ================================================ FILE: module/detect/models/hub/yolov5m6.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 0.67 # model depth multiple width_multiple: 0.75 # layer channel multiple anchors: - [19,27, 44,40, 38,94] # P3/8 - [96,68, 86,152, 180,137] # P4/16 - [140,301, 303,264, 238,542] # P5/32 - [436,615, 739,380, 925,792] # P6/64 # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 [-1, 3, C3, [768]], [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 [-1, 3, C3, [1024]], [-1, 1, SPPF, [1024, 5]], # 11 ] # YOLOv5 v6.0 head head: [[-1, 1, Conv, [768, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 8], 1, Concat, [1]], # cat backbone P5 [-1, 3, C3, [768, False]], # 15 [-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 19 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 23 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 20], 1, Concat, [1]], # cat head P4 [-1, 3, C3, [512, False]], # 26 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 16], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [768, False]], # 29 (P5/32-large) [-1, 1, Conv, [768, 3, 2]], [[-1, 12], 1, Concat, [1]], # cat head P6 [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] ================================================ FILE: module/detect/models/hub/yolov5n6.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.25 # layer channel multiple anchors: - [19,27, 44,40, 38,94] # P3/8 - [96,68, 86,152, 180,137] # P4/16 - [140,301, 303,264, 238,542] # P5/32 - [436,615, 739,380, 925,792] # P6/64 # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 [-1, 3, C3, [768]], [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 [-1, 3, C3, [1024]], [-1, 1, SPPF, [1024, 5]], # 11 ] # YOLOv5 v6.0 head head: [[-1, 1, Conv, [768, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 8], 1, Concat, [1]], # cat backbone P5 [-1, 3, C3, [768, False]], # 15 [-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 19 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 23 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 20], 1, Concat, [1]], # cat head P4 [-1, 3, C3, [512, False]], # 26 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 16], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [768, False]], # 29 (P5/32-large) [-1, 1, Conv, [768, 3, 2]], [[-1, 12], 1, Concat, [1]], # cat head P6 [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] ================================================ FILE: module/detect/models/hub/yolov5s-ghost.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3Ghost, [128]], [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3Ghost, [256]], [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3Ghost, [512]], [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32 [-1, 3, C3Ghost, [1024]], [-1, 1, SPPF, [1024, 5]], # 9 ] # YOLOv5 v6.0 head head: [[-1, 1, GhostConv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3Ghost, [512, False]], # 13 [-1, 1, GhostConv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small) [-1, 1, GhostConv, [256, 3, 2]], [[-1, 14], 1, Concat, [1]], # cat head P4 [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium) [-1, 1, GhostConv, [512, 3, 2]], [[-1, 10], 1, Concat, [1]], # cat head P5 [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] ================================================ FILE: module/detect/models/hub/yolov5s-transformer.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module [-1, 1, SPPF, [1024, 5]], # 9 ] # YOLOv5 v6.0 head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 13 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 17 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 14], 1, Concat, [1]], # cat head P4 [-1, 3, C3, [512, False]], # 20 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 10], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] ================================================ FILE: module/detect/models/hub/yolov5s6.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple anchors: - [19,27, 44,40, 38,94] # P3/8 - [96,68, 86,152, 180,137] # P4/16 - [140,301, 303,264, 238,542] # P5/32 - [436,615, 739,380, 925,792] # P6/64 # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 [-1, 3, C3, [768]], [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 [-1, 3, C3, [1024]], [-1, 1, SPPF, [1024, 5]], # 11 ] # YOLOv5 v6.0 head head: [[-1, 1, Conv, [768, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 8], 1, Concat, [1]], # cat backbone P5 [-1, 3, C3, [768, False]], # 15 [-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 19 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 23 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 20], 1, Concat, [1]], # cat head P4 [-1, 3, C3, [512, False]], # 26 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 16], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [768, False]], # 29 (P5/32-large) [-1, 1, Conv, [768, 3, 2]], [[-1, 12], 1, Concat, [1]], # cat head P6 [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] ================================================ FILE: module/detect/models/hub/yolov5x6.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 1.33 # model depth multiple width_multiple: 1.25 # layer channel multiple anchors: - [19,27, 44,40, 38,94] # P3/8 - [96,68, 86,152, 180,137] # P4/16 - [140,301, 303,264, 238,542] # P5/32 - [436,615, 739,380, 925,792] # P6/64 # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 [-1, 3, C3, [768]], [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 [-1, 3, C3, [1024]], [-1, 1, SPPF, [1024, 5]], # 11 ] # YOLOv5 v6.0 head head: [[-1, 1, Conv, [768, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 8], 1, Concat, [1]], # cat backbone P5 [-1, 3, C3, [768, False]], # 15 [-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 19 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 23 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 20], 1, Concat, [1]], # cat head P4 [-1, 3, C3, [512, False]], # 26 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 16], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [768, False]], # 29 (P5/32-large) [-1, 1, Conv, [768, 3, 2]], [[-1, 12], 1, Concat, [1]], # cat head P6 [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] ================================================ FILE: module/detect/models/tf.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ TensorFlow, Keras and TFLite versions of YOLOv5 Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 Usage: $ python models/tf.py --weights yolov5s.pt Export: $ python path/to/export.py --weights yolov5s.pt --include saved_model pb tflite tfjs """ import argparse import sys from copy import deepcopy from pathlib import Path FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH # ROOT = ROOT.relative_to(Path.cwd()) # relative import numpy as np import tensorflow as tf import torch import torch.nn as nn from tensorflow import keras from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv, DWConvTranspose2d, Focus, autopad) from models.experimental import MixConv2d, attempt_load from models.yolo import Detect from utils.activations import SiLU from utils.general import LOGGER, make_divisible, print_args class TFBN(keras.layers.Layer): # TensorFlow BatchNormalization wrapper def __init__(self, w=None): super().__init__() self.bn = keras.layers.BatchNormalization( beta_initializer=keras.initializers.Constant(w.bias.numpy()), gamma_initializer=keras.initializers.Constant(w.weight.numpy()), moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()), moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()), epsilon=w.eps) def call(self, inputs): return self.bn(inputs) class TFPad(keras.layers.Layer): # Pad inputs in spatial dimensions 1 and 2 def __init__(self, pad): super().__init__() if isinstance(pad, int): self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]) else: # tuple/list self.pad = tf.constant([[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]]) def call(self, inputs): return tf.pad(inputs, self.pad, mode='constant', constant_values=0) class TFConv(keras.layers.Layer): # Standard convolution def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): # ch_in, ch_out, weights, kernel, stride, padding, groups super().__init__() assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding) # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch conv = keras.layers.Conv2D( filters=c2, kernel_size=k, strides=s, padding='SAME' if s == 1 else 'VALID', use_bias=not hasattr(w, 'bn'), kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()), bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy())) self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity self.act = activations(w.act) if act else tf.identity def call(self, inputs): return self.act(self.bn(self.conv(inputs))) class TFDWConv(keras.layers.Layer): # Depthwise convolution def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None): # ch_in, ch_out, weights, kernel, stride, padding, groups super().__init__() assert c2 % c1 == 0, f'TFDWConv() output={c2} must be a multiple of input={c1} channels' conv = keras.layers.DepthwiseConv2D( kernel_size=k, depth_multiplier=c2 // c1, strides=s, padding='SAME' if s == 1 else 'VALID', use_bias=not hasattr(w, 'bn'), depthwise_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()), bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy())) self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity self.act = activations(w.act) if act else tf.identity def call(self, inputs): return self.act(self.bn(self.conv(inputs))) class TFDWConvTranspose2d(keras.layers.Layer): # Depthwise ConvTranspose2d def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None): # ch_in, ch_out, weights, kernel, stride, padding, groups super().__init__() assert c1 == c2, f'TFDWConv() output={c2} must be equal to input={c1} channels' assert k == 4 and p1 == 1, 'TFDWConv() only valid for k=4 and p1=1' weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy() self.c1 = c1 self.conv = [ keras.layers.Conv2DTranspose(filters=1, kernel_size=k, strides=s, padding='VALID', output_padding=p2, use_bias=True, kernel_initializer=keras.initializers.Constant(weight[..., i:i + 1]), bias_initializer=keras.initializers.Constant(bias[i])) for i in range(c1)] def call(self, inputs): return tf.concat([m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3)[:, 1:-1, 1:-1] class TFFocus(keras.layers.Layer): # Focus wh information into c-space def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): # ch_in, ch_out, kernel, stride, padding, groups super().__init__() self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv) def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) # inputs = inputs / 255 # normalize 0-255 to 0-1 inputs = [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]] return self.conv(tf.concat(inputs, 3)) class TFBottleneck(keras.layers.Layer): # Standard bottleneck def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2) self.add = shortcut and c1 == c2 def call(self, inputs): return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs)) class TFCrossConv(keras.layers.Layer): # Cross Convolution def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None): super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1) self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2) self.add = shortcut and c1 == c2 def call(self, inputs): return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs)) class TFConv2d(keras.layers.Layer): # Substitution for PyTorch nn.Conv2D def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): super().__init__() assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" self.conv = keras.layers.Conv2D(filters=c2, kernel_size=k, strides=s, padding='VALID', use_bias=bias, kernel_initializer=keras.initializers.Constant( w.weight.permute(2, 3, 1, 0).numpy()), bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None) def call(self, inputs): return self.conv(inputs) class TFBottleneckCSP(keras.layers.Layer): # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, number, shortcut, groups, expansion super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2) self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3) self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4) self.bn = TFBN(w.bn) self.act = lambda x: keras.activations.swish(x) self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) def call(self, inputs): y1 = self.cv3(self.m(self.cv1(inputs))) y2 = self.cv2(inputs) return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3)))) class TFC3(keras.layers.Layer): # CSP Bottleneck with 3 convolutions def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, number, shortcut, groups, expansion super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3) self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) def call(self, inputs): return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)) class TFC3x(keras.layers.Layer): # 3 module with cross-convolutions def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, number, shortcut, groups, expansion super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3) self.m = keras.Sequential([ TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)]) def call(self, inputs): return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)) class TFSPP(keras.layers.Layer): # Spatial pyramid pooling layer used in YOLOv3-SPP def __init__(self, c1, c2, k=(5, 9, 13), w=None): super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k] def call(self, inputs): x = self.cv1(inputs) return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3)) class TFSPPF(keras.layers.Layer): # Spatial pyramid pooling-Fast layer def __init__(self, c1, c2, k=5, w=None): super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2) self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME') def call(self, inputs): x = self.cv1(inputs) y1 = self.m(x) y2 = self.m(y1) return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3)) class TFDetect(keras.layers.Layer): # TF YOLOv5 Detect layer def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer super().__init__() self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors self.grid = [tf.zeros(1)] * self.nl # init grid self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32) self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), [self.nl, 1, -1, 1, 2]) self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] self.training = False # set to False after building model self.imgsz = imgsz for i in range(self.nl): ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] self.grid[i] = self._make_grid(nx, ny) def call(self, inputs): z = [] # inference output x = [] for i in range(self.nl): x.append(self.m[i](inputs[i])) # x(bs,20,20,255) to x(bs,3,20,20,85) ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no]) if not self.training: # inference y = tf.sigmoid(x[i]) grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5 anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4 xy = (y[..., 0:2] * 2 + grid) * self.stride[i] # xy wh = y[..., 2:4] ** 2 * anchor_grid # Normalize xywh to 0-1 to reduce calibration error xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) y = tf.concat([xy, wh, y[..., 4:]], -1) z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), x) @staticmethod def _make_grid(nx=20, ny=20): # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny)) return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32) class TFUpsample(keras.layers.Layer): # TF version of torch.nn.Upsample() def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' super().__init__() assert scale_factor == 2, "scale_factor must be 2" self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) # with default arguments: align_corners=False, half_pixel_centers=False # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, # size=(x.shape[1] * 2, x.shape[2] * 2)) def call(self, inputs): return self.upsample(inputs) class TFConcat(keras.layers.Layer): # TF version of torch.concat() def __init__(self, dimension=1, w=None): super().__init__() assert dimension == 1, "convert only NCHW to NHWC concat" self.d = 3 def call(self, inputs): return tf.concat(inputs, self.d) def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args m_str = m m = eval(m) if isinstance(m, str) else m # eval strings for j, a in enumerate(args): try: args[j] = eval(a) if isinstance(a, str) else a # eval strings except NameError: pass n = max(round(n * gd), 1) if n > 1 else n # depth gain if m in [ nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv, BottleneckCSP, C3, C3x]: c1, c2 = ch[f], args[0] c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 args = [c1, c2, *args[1:]] if m in [BottleneckCSP, C3, C3x]: args.insert(2, n) n = 1 elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: c2 = sum(ch[-1 if x == -1 else x + 1] for x in f) elif m is Detect: args.append([ch[x + 1] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) args.append(imgsz) else: c2 = ch[f] tf_m = eval('TF' + m_str.replace('nn.', '')) m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \ else tf_m(*args, w=model.model[i]) # module torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module t = str(m)[8:-2].replace('__main__.', '') # module type np = sum(x.numel() for x in torch_m_.parameters()) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) ch.append(c2) return keras.Sequential(layers), sorted(save) class TFModel: # TF YOLOv5 model def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes super().__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict else: # is *.yaml import yaml # for torch hub self.yaml_file = Path(cfg).name with open(cfg) as f: self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict # Define model if nc and nc != self.yaml['nc']: LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}") self.yaml['nc'] = nc # override yaml value self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz) def predict(self, inputs, tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, conf_thres=0.25): y = [] # outputs x = inputs for m in self.model.layers: if m.f != -1: # if not from previous layer x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers x = m(x) # run y.append(x if m.i in self.savelist else None) # save output # Add TensorFlow NMS if tf_nms: boxes = self._xywh2xyxy(x[0][..., :4]) probs = x[0][:, :, 4:5] classes = x[0][:, :, 5:] scores = probs * classes if agnostic_nms: nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres) else: boxes = tf.expand_dims(boxes, 2) nms = tf.image.combined_non_max_suppression(boxes, scores, topk_per_class, topk_all, iou_thres, conf_thres, clip_boxes=False) return nms, x[1] return x[0] # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...] # x = x[0][0] # [x(1,6300,85), ...] to x(6300,85) # xywh = x[..., :4] # x(6300,4) boxes # conf = x[..., 4:5] # x(6300,1) confidences # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes # return tf.concat([conf, cls, xywh], 1) @staticmethod def _xywh2xyxy(xywh): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1) return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1) class AgnosticNMS(keras.layers.Layer): # TF Agnostic NMS def call(self, input, topk_all, iou_thres, conf_thres): # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres), input, fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), name='agnostic_nms') @staticmethod def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS boxes, classes, scores = x class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) scores_inp = tf.reduce_max(scores, -1) selected_inds = tf.image.non_max_suppression(boxes, scores_inp, max_output_size=topk_all, iou_threshold=iou_thres, score_threshold=conf_thres) selected_boxes = tf.gather(boxes, selected_inds) padded_boxes = tf.pad(selected_boxes, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], mode="CONSTANT", constant_values=0.0) selected_scores = tf.gather(scores_inp, selected_inds) padded_scores = tf.pad(selected_scores, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], mode="CONSTANT", constant_values=-1.0) selected_classes = tf.gather(class_inds, selected_inds) padded_classes = tf.pad(selected_classes, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], mode="CONSTANT", constant_values=-1.0) valid_detections = tf.shape(selected_inds)[0] return padded_boxes, padded_scores, padded_classes, valid_detections def activations(act=nn.SiLU): # Returns TF activation from input PyTorch activation if isinstance(act, nn.LeakyReLU): return lambda x: keras.activations.relu(x, alpha=0.1) elif isinstance(act, nn.Hardswish): return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667 elif isinstance(act, (nn.SiLU, SiLU)): return lambda x: keras.activations.swish(x) else: raise Exception(f'no matching TensorFlow activation found for PyTorch activation {act}') def representative_dataset_gen(dataset, ncalib=100): # Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays for n, (path, img, im0s, vid_cap, string) in enumerate(dataset): im = np.transpose(img, [1, 2, 0]) im = np.expand_dims(im, axis=0).astype(np.float32) im /= 255 yield [im] if n >= ncalib: break def run( weights=ROOT / 'yolov5s.pt', # weights path imgsz=(640, 640), # inference size h,w batch_size=1, # batch size dynamic=False, # dynamic batch size ): # PyTorch model im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image model = attempt_load(weights, device=torch.device('cpu'), inplace=True, fuse=False) _ = model(im) # inference model.info() # TensorFlow model im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) _ = tf_model.predict(im) # inference # Keras model im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size) keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im)) keras_model.summary() LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.') def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--dynamic', action='store_true', help='dynamic batch size') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(vars(opt)) return opt def main(opt): run(**vars(opt)) if __name__ == "__main__": opt = parse_opt() main(opt) ================================================ FILE: module/detect/models/yolo.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ YOLO-specific modules Usage: $ python path/to/models/yolo.py --cfg yolov5s.yaml """ import argparse import contextlib import os import platform import sys from copy import deepcopy from pathlib import Path FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH if platform.system() != 'Windows': ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.common import * from models.experimental import * from utils.autoanchor import check_anchor_order from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args from utils.plots import feature_visualization from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device, time_sync) try: import thop # for FLOPs computation except ImportError: thop = None class Detect(nn.Module): stride = None # strides computed during build onnx_dynamic = False # ONNX export parameter export = False # export mode def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super().__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors self.grid = [torch.zeros(1)] * self.nl # init grid self.anchor_grid = [torch.zeros(1)] * self.nl # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.inplace = inplace # use in-place ops (e.g. slice assignment) def forward(self, x): z = [] # inference output for i in range(self.nl): x[i] = self.m[i](x[i]) # conv bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() if not self.training: # inference if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) y = x[i].sigmoid() if self.inplace: y[..., 0:2] = (y[..., 0:2] * 2 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, -1, self.no)) return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) def _make_grid(self, nx=20, ny=20, i=0): d = self.anchors[i].device t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility yv, xv = torch.meshgrid(y, x, indexing='ij') else: yv, xv = torch.meshgrid(y, x) grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid class Model(nn.Module): # YOLOv5 model def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes super().__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict else: # is *.yaml import yaml # for torch hub self.yaml_file = Path(cfg).name with open(cfg, encoding='ascii', errors='ignore') as f: self.yaml = yaml.safe_load(f) # model dict # Define model ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels if nc and nc != self.yaml['nc']: LOGGER.info(f"overriding model.yaml nc={self.yaml['nc']} with nc={nc}") self.yaml['nc'] = nc # override yaml value if anchors: LOGGER.info(f'overriding model.yaml anchors with anchors={anchors}') self.yaml['anchors'] = round(anchors) # override yaml value self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist self.names = [str(i) for i in range(self.yaml['nc'])] # default names self.inplace = self.yaml.get('inplace', True) # Build strides, anchors m = self.model[-1] # Detect() if isinstance(m, Detect): s = 256 # 2x min stride m.inplace = self.inplace m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward check_anchor_order(m) # must be in pixel-space (not grid-space) m.anchors /= m.stride.view(-1, 1, 1) self.stride = m.stride self._initialize_biases() # only run once # Init weights, biases initialize_weights(self) self.info() LOGGER.debug('') def forward(self, x, augment=False, profile=False, visualize=False): if augment: return self._forward_augment(x) # augmented inference, None return self._forward_once(x, profile, visualize) # single-scale inference, train def _forward_augment(self, x): img_size = x.shape[-2:] # height, width s = [1, 0.83, 0.67] # scales f = [None, 3, None] # flips (2-ud, 3-lr) y = [] # outputs for si, fi in zip(s, f): xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) yi = self._forward_once(xi)[0] # forward # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save yi = self._descale_pred(yi, fi, si, img_size) y.append(yi) y = self._clip_augmented(y) # clip augmented tails return torch.cat(y, 1), None # augmented inference, train def _forward_once(self, x, profile=False, visualize=False): y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: self._profile_one_layer(m, x, dt) x = m(x) # run y.append(x if m.i in self.save else None) # save output if visualize: feature_visualization(x, m.type, m.i, save_dir=visualize) return x def _descale_pred(self, p, flips, scale, img_size): # de-scale predictions following augmented inference (inverse operation) if self.inplace: p[..., :4] /= scale # de-scale if flips == 2: p[..., 1] = img_size[0] - p[..., 1] # de-flip ud elif flips == 3: p[..., 0] = img_size[1] - p[..., 0] # de-flip lr else: x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale if flips == 2: y = img_size[0] - y # de-flip ud elif flips == 3: x = img_size[1] - x # de-flip lr p = torch.cat((x, y, wh, p[..., 4:]), -1) return p def _clip_augmented(self, y): # Clip YOLOv5 augmented inference tails nl = self.model[-1].nl # number of detection layers (P3-P5) g = sum(4 ** x for x in range(nl)) # grid points e = 1 # exclude layer count i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices y[0] = y[0][:, :-i] # large i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices y[-1] = y[-1][:, i:] # small return y def _profile_one_layer(self, m, x, dt): c = isinstance(m, Detect) # is final layer, copy input as inplace fix o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs t = time_sync() for _ in range(10): m(x.copy() if c else x) dt.append((time_sync() - t) * 100) if m == self.model[0]: LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') if c: LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency # https://arxiv.org/abs/1708.02002 section 3.3 # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. m = self.model[-1] # Detect() module for mi, s in zip(m.m, m.stride): # from b = mi.bias.view(m.na, -1).detach() # conv.bias(255) to (3,85) b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) b[:, 5:] += math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) def _print_biases(self): m = self.model[-1] # Detect() module for mi in m.m: # from b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) LOGGER.info( ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()) ) # def _print_weights(self): # for m in self.model.modules(): # if type(m) is Bottleneck: # LOGGER.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers LOGGER.info('fuse yolo layers (conv & bn)') for m in self.model.modules(): if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv delattr(m, 'bn') # remove batchnorm m.forward = m.forward_fuse # update forward self.info() return self def info(self, verbose=False, img_size=640): # print model information model_info(self, verbose, img_size) def _apply(self, fn): # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers self = super()._apply(fn) m = self.model[-1] # Detect() if isinstance(m, Detect): m.stride = fn(m.stride) m.grid = list(map(fn, m.grid)) if isinstance(m.anchor_grid, list): m.anchor_grid = list(map(fn, m.anchor_grid)) return self def parse_model(d, ch): # model_dict, input_channels(3) LOGGER.debug(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args m = eval(m) if isinstance(m, str) else m # eval strings for j, a in enumerate(args): with contextlib.suppress(NameError): args[j] = eval(a) if isinstance(a, str) else a # eval strings n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain if m in (Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x): c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) args = [c1, c2, *args[1:]] if m in [BottleneckCSP, C3, C3TR, C3Ghost, C3x]: args.insert(2, n) # number of repeats n = 1 elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: c2 = sum(ch[x] for x in f) elif m is Detect: args.append([ch[x] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) elif m is Contract: c2 = ch[f] * args[0] ** 2 elif m is Expand: c2 = ch[f] // args[0] ** 2 else: c2 = ch[f] m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module t = str(m)[8:-2].replace('__main__.', '') # module type np = sum(x.numel() for x in m_.parameters()) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params LOGGER.debug(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) if i == 0: ch = [] ch.append(c2) return nn.Sequential(*layers), sorted(save) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--profile', action='store_true', help='profile model speed') parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer') parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') opt = parser.parse_args() opt.cfg = check_yaml(opt.cfg) # check YAML print_args(vars(opt)) device = select_device(opt.device) # Create model im = torch.rand(opt.batch_size, 3, 640, 640).to(device) model = Model(opt.cfg).to(device) # Options if opt.line_profile: # profile layer by layer _ = model(im, profile=True) elif opt.profile: # profile forward-backward results = profile(input=im, ops=[model], n=3) elif opt.test: # test all models for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'): try: _ = Model(cfg) except Exception as e: print(f'Error in {cfg}: {e}') else: # report fused model summary model.fuse() ================================================ FILE: module/detect/models/yolov5l.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 [-1, 3, C3, [1024]], [-1, 1, SPPF, [1024, 5]], # 9 ] # YOLOv5 v6.0 head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 13 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 17 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 14], 1, Concat, [1]], # cat head P4 [-1, 3, C3, [512, False]], # 20 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 10], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] ================================================ FILE: module/detect/models/yolov5m.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 0.67 # model depth multiple width_multiple: 0.75 # layer channel multiple anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 [-1, 3, C3, [1024]], [-1, 1, SPPF, [1024, 5]], # 9 ] # YOLOv5 v6.0 head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 13 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 17 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 14], 1, Concat, [1]], # cat head P4 [-1, 3, C3, [512, False]], # 20 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 10], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] ================================================ FILE: module/detect/models/yolov5n.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.25 # layer channel multiple anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 [-1, 3, C3, [1024]], [-1, 1, SPPF, [1024, 5]], # 9 ] # YOLOv5 v6.0 head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 13 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 17 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 14], 1, Concat, [1]], # cat head P4 [-1, 3, C3, [512, False]], # 20 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 10], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] ================================================ FILE: module/detect/models/yolov5s.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 [-1, 3, C3, [1024]], [-1, 1, SPPF, [1024, 5]], # 9 ] # YOLOv5 v6.0 head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 13 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 17 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 14], 1, Concat, [1]], # cat head P4 [-1, 3, C3, [512, False]], # 20 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 10], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] ================================================ FILE: module/detect/models/yolov5x.yaml ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Parameters nc: 80 # number of classes depth_multiple: 1.33 # model depth multiple width_multiple: 1.25 # layer channel multiple anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 [-1, 3, C3, [1024]], [-1, 1, SPPF, [1024, 5]], # 9 ] # YOLOv5 v6.0 head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 3, C3, [512, False]], # 13 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 3, C3, [256, False]], # 17 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 14], 1, Concat, [1]], # cat head P4 [-1, 3, C3, [512, False]], # 20 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 10], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] ================================================ FILE: module/detect/requirements.txt ================================================ # YOLOv5 requirements # Usage: pip install -r requirements.txt # Base ---------------------------------------- matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.1 Pillow>=7.1.2 PyYAML>=5.3.1 requests>=2.23.0 scipy>=1.4.1 torch>=1.7.0 torchvision>=0.8.1 tqdm>=4.64.0 protobuf<4.21.3 # https://github.com/ultralytics/yolov5/issues/8012 # Logging ------------------------------------- tensorboard>=2.4.1 # wandb # Plotting ------------------------------------ pandas>=1.1.4 seaborn>=0.11.0 # Export -------------------------------------- # coremltools>=4.1 # CoreML export # onnx>=1.9.0 # ONNX export # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export # scikit-learn==0.19.2 # CoreML quantization # tensorflow>=2.4.1 # TFLite export # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export # Extras -------------------------------------- ipython # interactive notebook psutil # system utilization thop>=0.1.1 # FLOPs computation # albumentations>=1.0.3 # pycocotools>=2.0 # COCO mAP # roboflow ================================================ FILE: module/detect/utils/__init__.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ utils/initialization """ def notebook_init(verbose=True): # Check system software and hardware print('Checking setup...') import os import shutil from utils.general import check_requirements, emojis, is_colab from utils.torch_utils import select_device # imports check_requirements(('psutil', 'IPython')) import psutil from IPython import display # to display images and clear console output if is_colab(): shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory # System info if verbose: gb = 1 << 30 # bytes to GiB (1024 ** 3) ram = psutil.virtual_memory().total total, used, free = shutil.disk_usage("/") display.clear_output() s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' else: s = '' select_device(newline=False) print(emojis(f'Setup complete ✅ {s}')) return display ================================================ FILE: module/detect/utils/activations.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Activation functions """ import torch import torch.nn as nn import torch.nn.functional as F class SiLU(nn.Module): # SiLU activation https://arxiv.org/pdf/1606.08415.pdf @staticmethod def forward(x): return x * torch.sigmoid(x) class Hardswish(nn.Module): # Hard-SiLU activation @staticmethod def forward(x): # return x * F.hardsigmoid(x) # for TorchScript and CoreML return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX class Mish(nn.Module): # Mish activation https://github.com/digantamisra98/Mish @staticmethod def forward(x): return x * F.softplus(x).tanh() class MemoryEfficientMish(nn.Module): # Mish activation memory-efficient class F(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] sx = torch.sigmoid(x) fx = F.softplus(x).tanh() return grad_output * (fx + x * sx * (1 - fx * fx)) def forward(self, x): return self.F.apply(x) class FReLU(nn.Module): # FReLU activation https://arxiv.org/abs/2007.11824 def __init__(self, c1, k=3): # ch_in, kernel super().__init__() self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) self.bn = nn.BatchNorm2d(c1) def forward(self, x): return torch.max(x, self.bn(self.conv(x))) class AconC(nn.Module): r""" ACON activation (activate or not) AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter according to "Activate or Not: Learning Customized Activation" . """ def __init__(self, c1): super().__init__() self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) def forward(self, x): dpx = (self.p1 - self.p2) * x return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x class MetaAconC(nn.Module): r""" ACON activation (activate or not) MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network according to "Activate or Not: Learning Customized Activation" . """ def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r super().__init__() c2 = max(r, c1 // r) self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) # self.bn1 = nn.BatchNorm2d(c2) # self.bn2 = nn.BatchNorm2d(c1) def forward(self, x): y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed dpx = (self.p1 - self.p2) * x return dpx * torch.sigmoid(beta * dpx) + self.p2 * x ================================================ FILE: module/detect/utils/augmentations.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Image augmentation functions """ import math import random import cv2 import numpy as np from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box from utils.metrics import bbox_ioa class Albumentations: # YOLOv5 Albumentations class (optional, only used if package is installed) def __init__(self): self.transform = None try: import albumentations as A check_version(A.__version__, '1.0.3', hard=True) # version requirement T = [ A.Blur(p=0.01), A.MedianBlur(p=0.01), A.ToGray(p=0.01), A.CLAHE(p=0.01), A.RandomBrightnessContrast(p=0.0), A.RandomGamma(p=0.0), A.ImageCompression(quality_lower=75, p=0.0)] # transforms self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) except ImportError: # package not installed, skip pass except Exception as e: LOGGER.info(colorstr('albumentations: ') + f'{e}') def __call__(self, im, labels, p=1.0): if self.transform and random.random() < p: new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) return im, labels def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): # HSV color-space augmentation if hgain or sgain or vgain: r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) dtype = im.dtype # uint8 x = np.arange(0, 256, dtype=r.dtype) lut_hue = ((x * r[0]) % 180).astype(dtype) lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) lut_val = np.clip(x * r[2], 0, 255).astype(dtype) im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed def hist_equalize(im, clahe=True, bgr=False): # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) if clahe: c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) yuv[:, :, 0] = c.apply(yuv[:, :, 0]) else: yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB def replicate(im, labels): # Replicate labels h, w = im.shape[:2] boxes = labels[:, 1:].astype(int) x1, y1, x2, y2 = boxes.T s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices x1b, y1b, x2b, y2b = boxes[i] bh, bw = y2b - y1b, x2b - x1b yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) return im, labels def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): # Resize and pad image while meeting stride-multiple constraints shape = im.shape[:2] # current shape [height, width] if isinstance(new_shape, int): new_shape = (new_shape, new_shape) # Scale ratio (new / old) r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) if not scaleup: # only scale down, do not scale up (for better eval mAP) r = min(r, 1.0) # Compute padding ratio = r, r # width, height ratios new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding if auto: # minimum rectangle dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding elif scaleFill: # stretch dw, dh = 0.0, 0.0 new_unpad = (new_shape[1], new_shape[0]) ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios dw /= 2 # divide padding into 2 sides dh /= 2 if shape[::-1] != new_unpad: # resize im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border return im, ratio, (dw, dh) def random_perspective( im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0) ): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] height = im.shape[0] + border[0] * 2 # shape(h,w,c) width = im.shape[1] + border[1] * 2 # Center C = np.eye(3) C[0, 2] = -im.shape[1] / 2 # x translation (pixels) C[1, 2] = -im.shape[0] / 2 # y translation (pixels) # Perspective P = np.eye(3) P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) # Rotation and Scale R = np.eye(3) a = random.uniform(-degrees, degrees) # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations s = random.uniform(1 - scale, 1 + scale) # s = 2 ** random.uniform(-scale, scale) R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) # Shear S = np.eye(3) S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) # Translation T = np.eye(3) T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) # Combined rotation matrix M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed if perspective: im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) else: # affine im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) # Visualize # import matplotlib.pyplot as plt # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() # ax[0].imshow(im[:, :, ::-1]) # base # ax[1].imshow(im2[:, :, ::-1]) # warped # Transform label coordinates n = len(targets) if n: use_segments = any(x.any() for x in segments) new = np.zeros((n, 4)) if use_segments: # warp segments segments = resample_segments(segments) # upsample for i, segment in enumerate(segments): xy = np.ones((len(segment), 3)) xy[:, :2] = segment xy = xy @ M.T # transform xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine # clip new[i] = segment2box(xy, width, height) else: # warp boxes xy = np.ones((n * 4, 3)) xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 xy = xy @ M.T # transform xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine # create new boxes x = xy[:, [0, 2, 4, 6]] y = xy[:, [1, 3, 5, 7]] new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T # clip new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) # filter candidates i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) targets = targets[i] targets[:, 1:5] = new[i] return im, targets def copy_paste(im, labels, segments, p=0.5): # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) n = len(segments) if p and n: h, w, c = im.shape # height, width, channels im_new = np.zeros(im.shape, np.uint8) for j in random.sample(range(n), k=round(p * n)): l, s = labels[j], segments[j] box = w - l[3], l[2], w - l[1], l[4] ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area if (ioa < 0.30).all(): # allow 30% obscuration of existing labels labels = np.concatenate((labels, [[l[0], *box]]), 0) segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) result = cv2.bitwise_and(src1=im, src2=im_new) result = cv2.flip(result, 1) # augment segments (flip left-right) i = result > 0 # pixels to replace # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug return im, labels, segments def cutout(im, labels, p=0.5): # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 if random.random() < p: h, w = im.shape[:2] scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction for s in scales: mask_h = random.randint(1, int(h * s)) # create random masks mask_w = random.randint(1, int(w * s)) # box xmin = max(0, random.randint(0, w) - mask_w // 2) ymin = max(0, random.randint(0, h) - mask_h // 2) xmax = min(w, xmin + mask_w) ymax = min(h, ymin + mask_h) # apply random color mask im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] # return unobscured labels if len(labels) and s > 0.03: box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area labels = labels[ioa < 0.60] # remove >60% obscured labels return labels def mixup(im, labels, im2, labels2): # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 im = (im * r + im2 * (1 - r)).astype(np.uint8) labels = np.concatenate((labels, labels2), 0) return im, labels def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio w1, h1 = box1[2] - box1[0], box1[3] - box1[1] w2, h2 = box2[2] - box2[0], box2[3] - box2[1] ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates ================================================ FILE: module/detect/utils/autoanchor.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ AutoAnchor utils """ import random import numpy as np import torch import yaml from tqdm import tqdm from utils.general import LOGGER, colorstr, emojis PREFIX = colorstr('AutoAnchor: ') def check_anchor_order(m): # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer da = a[-1] - a[0] # delta a ds = m.stride[-1] - m.stride[0] # delta s if da and (da.sign() != ds.sign()): # same order LOGGER.info(f'{PREFIX}Reversing anchor order') m.anchors[:] = m.anchors.flip(0) def check_anchors(dataset, model, thr=4.0, imgsz=640): # Check anchor fit to data, recompute if necessary m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh def metric(k): # compute metric r = wh[:, None] / k[None] x = torch.min(r, 1 / r).min(2)[0] # ratio metric best = x.max(1)[0] # best_x aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold bpr = (best > 1 / thr).float().mean() # best possible recall return bpr, aat stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides anchors = m.anchors.clone() * stride # current anchors bpr, aat = metric(anchors.cpu().view(-1, 2)) s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' if bpr > 0.98: # threshold to recompute LOGGER.info(emojis(f'{s}Current anchors are a good fit to dataset ✅')) else: LOGGER.info(emojis(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...')) na = m.anchors.numel() // 2 # number of anchors try: anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) except Exception as e: LOGGER.info(f'{PREFIX}ERROR: {e}') new_bpr = metric(anchors)[0] if new_bpr > bpr: # replace anchors anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) m.anchors[:] = anchors.clone().view_as(m.anchors) check_anchor_order(m) # must be in pixel-space (not grid-space) m.anchors /= stride s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' else: s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' LOGGER.info(emojis(s)) def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): """ Creates kmeans-evolved anchors from training dataset Arguments: dataset: path to data.yaml, or a loaded dataset n: number of anchors img_size: image size used for training thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 gen: generations to evolve anchors using genetic algorithm verbose: print all results Return: k: kmeans evolved anchors Usage: from utils.autoanchor import *; _ = kmean_anchors() """ from scipy.cluster.vq import kmeans npr = np.random thr = 1 / thr def metric(k, wh): # compute metrics r = wh[:, None] / k[None] x = torch.min(r, 1 / r).min(2)[0] # ratio metric # x = wh_iou(wh, torch.tensor(k)) # iou metric return x, x.max(1)[0] # x, best_x def anchor_fitness(k): # mutation fitness _, best = metric(torch.tensor(k, dtype=torch.float32), wh) return (best * (best > thr).float()).mean() # fitness def print_results(k, verbose=True): k = k[np.argsort(k.prod(1))] # sort small to large x, best = metric(k, wh0) bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ f'past_thr={x[x > thr].mean():.3f}-mean: ' for x in k: s += '%i,%i, ' % (round(x[0]), round(x[1])) if verbose: LOGGER.info(s[:-2]) return k if isinstance(dataset, str): # *.yaml file with open(dataset, errors='ignore') as f: data_dict = yaml.safe_load(f) # model dict from utils.dataloaders import LoadImagesAndLabels dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) # Get label wh shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh # Filter i = (wh0 < 3.0).any(1).sum() if i: LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size') wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 # Kmeans init try: LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') assert n <= len(wh) # apply overdetermined constraint s = wh.std(0) # sigmas for whitening k = kmeans(wh / s, n, iter=30)[0] * s # points assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar except Exception: LOGGER.warning(f'{PREFIX}WARNING: switching strategies from kmeans to random init') k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) k = print_results(k, verbose=False) # Plot # k, d = [None] * 20, [None] * 20 # for i in tqdm(range(1, 21)): # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) # ax = ax.ravel() # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh # ax[0].hist(wh[wh[:, 0]<100, 0],400) # ax[1].hist(wh[wh[:, 1]<100, 1],400) # fig.savefig('wh.png', dpi=200) # Evolve f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for _ in pbar: v = np.ones(sh) while (v == 1).all(): # mutate until a change occurs (prevent duplicates) v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) kg = (k.copy() * v).clip(min=2.0) fg = anchor_fitness(kg) if fg > f: f, k = fg, kg.copy() pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' if verbose: print_results(k, verbose) return print_results(k) ================================================ FILE: module/detect/utils/autobatch.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Auto-batch utils """ from copy import deepcopy import numpy as np import torch from utils.general import LOGGER, colorstr, emojis from utils.torch_utils import profile def check_train_batch_size(model, imgsz=640, amp=True): # Check YOLOv5 training batch size with torch.cuda.amp.autocast(amp): return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): # Automatically estimate best batch size to use `fraction` of available CUDA memory # Usage: # import torch # from utils.autobatch import autobatch # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) # print(autobatch(model)) # Check device prefix = colorstr('AutoBatch: ') LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') device = next(model.parameters()).device # get model device if device.type == 'cpu': LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') return batch_size # Inspect CUDA memory gb = 1 << 30 # bytes to GiB (1024 ** 3) d = str(device).upper() # 'CUDA:0' properties = torch.cuda.get_device_properties(device) # device properties t = properties.total_memory / gb # GiB total r = torch.cuda.memory_reserved(device) / gb # GiB reserved a = torch.cuda.memory_allocated(device) / gb # GiB allocated f = t - (r + a) # GiB free LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') # Profile batch sizes batch_sizes = [1, 2, 4, 8, 16] try: img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes] results = profile(img, model, n=3, device=device) except Exception as e: LOGGER.warning(f'{prefix}{e}') # Fit a solution y = [x[2] for x in results if x] # memory [2] p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) if None in results: # some sizes failed i = results.index(None) # first fail index if b >= batch_sizes[i]: # y intercept above failure point b = batch_sizes[max(i - 1, 0)] # select prior safe point fraction = np.polyval(p, b) / t # actual fraction predicted LOGGER.info(emojis(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅')) return b ================================================ FILE: module/detect/utils/aws/__init__.py ================================================ ================================================ FILE: module/detect/utils/aws/mime.sh ================================================ # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ # This script will run on every instance restart, not only on first start # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- Content-Type: multipart/mixed; boundary="//" MIME-Version: 1.0 --// Content-Type: text/cloud-config; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: attachment; filename="cloud-config.txt" #cloud-config cloud_final_modules: - [scripts-user, always] --// Content-Type: text/x-shellscript; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: attachment; filename="userdata.txt" #!/bin/bash # --- paste contents of userdata.sh here --- --// ================================================ FILE: module/detect/utils/aws/resume.py ================================================ # Resume all interrupted trainings in yolov5/ dir including DDP trainings # Usage: $ python utils/aws/resume.py import os import sys from pathlib import Path import torch import yaml FILE = Path(__file__).resolve() ROOT = FILE.parents[2] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH port = 0 # --master_port path = Path('').resolve() for last in path.rglob('*/**/last.pt'): ckpt = torch.load(last) if ckpt['optimizer'] is None: continue # Load opt.yaml with open(last.parent.parent / 'opt.yaml', errors='ignore') as f: opt = yaml.safe_load(f) # Get device count d = opt['device'].split(',') # devices nd = len(d) # number of devices ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel if ddp: # multi-GPU port += 1 cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} trainfd.py --resume {last}' else: # single-GPU cmd = f'python trainfd.py --resume {last}' cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread print(cmd) os.system(cmd) ================================================ FILE: module/detect/utils/aws/userdata.sh ================================================ #!/bin/bash # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html # This script will run only once on first instance start (for a re-start script see mime.sh) # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir # Use >300 GB SSD cd home/ubuntu if [ ! -d yolov5 ]; then echo "Running first-time script." # install dependencies, download COCO, pull Docker git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 cd yolov5 bash data/scripts/get_coco.sh && echo "COCO done." & sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & wait && echo "All tasks done." # finish background tasks else echo "Running re-start script." # resume interrupted runs i=0 list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' while IFS= read -r id; do ((i++)) echo "restarting container $i: $id" sudo docker start $id # sudo docker exec -it $id python train.py --resume # single-GPU sudo docker exec -d $id python utils/aws/resume.py # multi-scenario done <<<"$list" fi ================================================ FILE: module/detect/utils/benchmarks.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run YOLOv5 benchmarks on all supported export formats Format | `export.py --include` | Model --- | --- | --- PyTorch | - | yolov5s.pt TorchScript | `torchscript` | yolov5s.torchscript ONNX | `onnx` | yolov5s.onnx OpenVINO | `openvino` | yolov5s_openvino_model/ TensorRT | `engine` | yolov5s.engine CoreML | `coreml` | yolov5s.mlmodel TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ TensorFlow GraphDef | `pb` | yolov5s.pb TensorFlow Lite | `tflite` | yolov5s.tflite TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite TensorFlow.js | `tfjs` | yolov5s_web_model/ Requirements: $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT Usage: $ python utils/benchmarks.py --weights yolov5s.pt --img 640 """ import argparse import platform import sys import time from pathlib import Path import pandas as pd FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH # ROOT = ROOT.relative_to(Path.cwd()) # relative import export import val from utils import notebook_init from utils.general import LOGGER, check_yaml, file_size, print_args from utils.torch_utils import select_device def run( weights=ROOT / 'yolov5s.pt', # weights path imgsz=640, # inference size (pixels) batch_size=1, # batch size data=ROOT / 'data/coco128.yaml', # dataset.yaml path device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu half=False, # use FP16 half-precision inference test=False, # test exports only pt_only=False, # test PyTorch only hard_fail=False, # throw error on benchmark failure ): y, t = [], time.time() device = select_device(device) for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) try: assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML if 'cpu' in device.type: assert cpu, 'inference not supported on CPU' if 'cuda' in device.type: assert gpu, 'inference not supported on GPU' # Export if f == '-': w = weights # PyTorch format else: w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others assert suffix in str(w), 'export failed' # Validate result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls)) speeds = result[2] # times (preprocess, inference, postprocess) y.append([name, round(file_size(w), 1), round(metrics[3], 4), round(speeds[1], 2)]) # MB, mAP, t_inference except Exception as e: if hard_fail: assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}') y.append([name, None, None, None]) # mAP, t_inference if pt_only and i == 0: break # break after PyTorch # Print results LOGGER.info('\n') parse_opt() notebook_init() # print system info c = ['Format', 'Size (MB)', 'mAP@0.5:0.95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] py = pd.DataFrame(y, columns=c) LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') LOGGER.info(str(py if map else py.iloc[:, :2])) return py def test( weights=ROOT / 'yolov5s.pt', # weights path imgsz=640, # inference size (pixels) batch_size=1, # batch size data=ROOT / 'data/coco128.yaml', # dataset.yaml path device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu half=False, # use FP16 half-precision inference test=False, # test exports only pt_only=False, # test PyTorch only hard_fail=False, # throw error on benchmark failure ): y, t = [], time.time() device = select_device(device) for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable) try: w = weights if f == '-' else \ export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights assert suffix in str(w), 'export failed' y.append([name, True]) except Exception: y.append([name, False]) # mAP, t_inference # Print results LOGGER.info('\n') parse_opt() notebook_init() # print system info py = pd.DataFrame(y, columns=['Format', 'Export']) LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)') LOGGER.info(str(py)) return py def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--test', action='store_true', help='test exports only') parser.add_argument('--pt-only', action='store_true', help='test PyTorch only') parser.add_argument('--hard-fail', action='store_true', help='throw error on benchmark failure') opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML print_args(vars(opt)) return opt def main(opt): test(**vars(opt)) if opt.test else run(**vars(opt)) if __name__ == "__main__": opt = parse_opt() main(opt) ================================================ FILE: module/detect/utils/callbacks.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Callback utils """ class Callbacks: """" Handles all registered callbacks for YOLOv5 Hooks """ def __init__(self): # Define the available callbacks self._callbacks = { 'on_pretrain_routine_start': [], 'on_pretrain_routine_end': [], 'on_train_start': [], 'on_train_epoch_start': [], 'on_train_batch_start': [], 'optimizer_step': [], 'on_before_zero_grad': [], 'on_train_batch_end': [], 'on_train_epoch_end': [], 'on_val_start': [], 'on_val_batch_start': [], 'on_val_image_end': [], 'on_val_batch_end': [], 'on_val_end': [], 'on_fit_epoch_end': [], # fit = train + eval 'on_model_save': [], 'on_train_end': [], 'on_params_update': [], 'teardown': [], } self.stop_training = False # set True to interrupt training def register_action(self, hook, name='', callback=None): """ Register a new action to a callback hook Args: hook: The callback hook name to register the action to name: The name of the action for later reference callback: The callback to fire """ assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" assert callable(callback), f"callback '{callback}' is not callable" self._callbacks[hook].append({'name': name, 'callback': callback}) def get_registered_actions(self, hook=None): """" Returns all the registered actions by callback hook Args: hook: The name of the hook to check, defaults to all """ return self._callbacks[hook] if hook else self._callbacks def run(self, hook, *args, **kwargs): """ Loop through the registered actions and fire all callbacks Args: hook: The name of the hook to check, defaults to all args: Arguments to receive from YOLOv5 kwargs: Keyword Arguments to receive from YOLOv5 """ assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" for logger in self._callbacks[hook]: logger['callback'](*args, **kwargs) ================================================ FILE: module/detect/utils/dataloaders.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Dataloaders and dataset utils """ import glob import hashlib import json import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import Pool, ThreadPool from pathlib import Path from threading import Thread from urllib.parse import urlparse from zipfile import ZipFile import numpy as np import torch import torch.nn.functional as F import yaml from PIL import ExifTags, Image, ImageOps from torch.utils.data import DataLoader, Dataset, dataloader, distributed from tqdm import tqdm from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): if ExifTags.TAGS[orientation] == 'Orientation': break def get_hash(paths): # Returns a single hash value of a list of paths (files or dirs) size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes h = hashlib.md5(str(size).encode()) # hash sizes h.update(''.join(paths).encode()) # hash paths return h.hexdigest() # return hash def exif_size(img): # Returns exif-corrected PIL size s = img.size # (width, height) try: rotation = dict(img._getexif().items())[orientation] if rotation in [6, 8]: # rotation 270 or 90 s = (s[1], s[0]) except Exception: pass return s def exif_transpose(image): """ Transpose a PIL image accordingly if it has an EXIF Orientation tag. Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() :param image: The image to transpose. :return: An image. """ exif = image.getexif() orientation = exif.get(0x0112, 1) # default 1 if orientation > 1: method = { 2: Image.FLIP_LEFT_RIGHT, 3: Image.ROTATE_180, 4: Image.FLIP_TOP_BOTTOM, 5: Image.TRANSPOSE, 6: Image.ROTATE_270, 7: Image.TRANSVERSE, 8: Image.ROTATE_90, }.get(orientation) if method is not None: image = image.transpose(method) del exif[0x0112] image.info["exif"] = exif.tobytes() return image def seed_worker(worker_id): # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader worker_seed = torch.initial_seed() % 2 ** 32 np.random.seed(worker_seed) random.seed(worker_seed) def create_dataloader( path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False ): if rect and shuffle: LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabels( path, imgsz, batch_size, augment=augment, # augmentation hyp=hyp, # hyperparameters rect=rect, # rectangular batches cache_images=cache, single_cls=single_cls, stride=int(stride), pad=pad, image_weights=image_weights, prefix=prefix ) batch_size = min(batch_size, len(dataset)) nd = torch.cuda.device_count() # number of CUDA devices nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() generator.manual_seed(0) return loader( dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, num_workers=nw, sampler=sampler, pin_memory=True, collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, worker_init_fn=seed_worker, generator=generator ), dataset class InfiniteDataLoader(dataloader.DataLoader): """ Dataloader that reuses workers Uses same syntax as vanilla DataLoader """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) self.iterator = super().__iter__() def __len__(self): return len(self.batch_sampler.sampler) def __iter__(self): for _ in range(len(self)): yield next(self.iterator) class _RepeatSampler: """ Sampler that repeats forever Args: sampler (Sampler) """ def __init__(self, sampler): self.sampler = sampler def __iter__(self): while True: yield from iter(self.sampler) class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True): files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: p = str(Path(p).resolve()) if '*' in p: files.extend(sorted(glob.glob(p, recursive=True))) # glob elif os.path.isdir(p): files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir elif os.path.isfile(p): files.append(p) # files else: raise FileNotFoundError(f'{p} does not exist') images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] ni, nv = len(images), len(videos) self.img_size = img_size self.stride = stride self.files = images + videos self.nf = ni + nv # number of files self.video_flag = [False] * ni + [True] * nv self.mode = 'image' self.auto = auto if any(videos): self.new_video(videos[0]) # new video else: self.cap = None assert self.nf > 0, f'No images or videos found in {p}. ' \ f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' def __iter__(self): self.count = 0 return self def __next__(self): if self.count == self.nf: raise StopIteration path = self.files[self.count] if self.video_flag[self.count]: # Read video self.mode = 'video' ret_val, img0 = self.cap.read() while not ret_val: self.count += 1 self.cap.release() if self.count == self.nf: # last video raise StopIteration path = self.files[self.count] self.new_video(path) ret_val, img0 = self.cap.read() self.frame += 1 s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' else: # Read image self.count += 1 img0 = cv2.imread(path) # BGR assert img0 is not None, f'Image Not Found {path}' s = f'image {self.count}/{self.nf} {path}: ' # Padded resize img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0] # Convert img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) return path, img, img0, self.cap, s def new_video(self, path): self.frame = 0 self.cap = cv2.VideoCapture(path) self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) def __len__(self): return self.nf # number of files class LoadWebcam: # for inference # YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0` def __init__(self, pipe='0', img_size=640, stride=32): self.img_size = img_size self.stride = stride self.pipe = eval(pipe) if pipe.isnumeric() else pipe self.cap = cv2.VideoCapture(self.pipe) # video capture object self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size def __iter__(self): self.count = -1 return self def __next__(self): self.count += 1 if cv2.waitKey(1) == ord('q'): # q to quit self.cap.release() cv2.destroyAllWindows() raise StopIteration # Read frame ret_val, img0 = self.cap.read() img0 = cv2.flip(img0, 1) # flip left-right # Print assert ret_val, f'Camera Error {self.pipe}' img_path = 'webcam.jpg' s = f'webcam {self.count}: ' # Padded resize img = letterbox(img0, self.img_size, stride=self.stride)[0] # Convert img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) return img_path, img, img0, None, s def __len__(self): return 0 class LoadStreams: # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): self.mode = 'stream' self.img_size = img_size self.stride = stride if os.path.isfile(sources): with open(sources) as f: sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] else: sources = [sources] n = len(sources) self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n self.sources = [clean_str(x) for x in sources] # clean source names for later self.auto = auto for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream st = f'{i + 1}/{n}: {s}... ' if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video check_requirements(('pafy', 'youtube_dl==2020.12.2')) import pafy s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam if s == 0: assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.' cap = cv2.VideoCapture(s) assert cap.isOpened(), f'{st}Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback _, self.imgs[i] = cap.read() # guarantee first frame self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") self.threads[i].start() LOGGER.info('') # newline # check for common shapes s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs]) self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal if not self.rect: LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.') def update(self, i, cap, stream): # Read stream `i` frames in daemon thread n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame while cap.isOpened() and n < f: n += 1 # _, self.imgs[index] = cap.read() cap.grab() if n % read == 0: success, im = cap.retrieve() if success: self.imgs[i] = im else: LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') self.imgs[i] = np.zeros_like(self.imgs[i]) cap.open(stream) # re-open stream if signal was lost time.sleep(0.0) # wait time def __iter__(self): self.count = -1 return self def __next__(self): self.count += 1 if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit cv2.destroyAllWindows() raise StopIteration # Letterbox img0 = self.imgs.copy() img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0] # Stack img = np.stack(img, 0) # Convert img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW img = np.ascontiguousarray(img) return self.sources, img, img0, None, '' def __len__(self): return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years def img2label_paths(img_paths): # Define label paths as a function of image paths sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] class LoadImagesAndLabels(Dataset): # YOLOv5 train_loader/val_loader, loads images and labels for training and validation cache_version = 0.6 # dataset labels *.cache version rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] def __init__( self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, cache_images=False, single_cls=False, stride=32, pad=0.0, prefix='' ): self.img_size = img_size self.augment = augment self.hyp = hyp self.image_weights = image_weights self.rect = False if image_weights else rect self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) self.mosaic_border = [-img_size // 2, -img_size // 2] self.stride = stride self.path = path self.albumentations = Albumentations() if augment else None try: f = [] # image files for p in path if isinstance(path, list) else [path]: p = Path(p) # os-agnostic if p.is_dir(): # dir f += glob.glob(str(p / '**' / '*.*'), recursive=True) # f = list(p.rglob('*.*')) # pathlib elif p.is_file(): # file with open(p) as t: t = t.read().strip().splitlines() parent = str(p.parent) + os.sep f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) else: raise FileNotFoundError(f'{prefix}{p} does not exist') self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib assert self.im_files, f'{prefix}No images found' except Exception as e: raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}') # Check cache self.label_files = img2label_paths(self.im_files) # labels cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') try: cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict assert cache['version'] == self.cache_version # matches current version assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash except Exception: cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops # Display cache nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists and LOCAL_RANK in {-1, 0}: d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' # Read cache [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items labels, shapes, self.segments = zip(*cache.values()) self.labels = list(labels) self.shapes = np.array(shapes, dtype=np.float64) self.im_files = list(cache.keys()) # update self.label_files = img2label_paths(cache.keys()) # update n = len(shapes) # number of images bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index nb = bi[-1] + 1 # number of batches self.batch = bi # batch index of image self.n = n self.indices = range(n) # Update labels include_class = [] # filter labels to include only these classes (optional) include_class_array = np.array(include_class).reshape(1, -1) for i, (label, segment) in enumerate(zip(self.labels, self.segments)): if include_class: j = (label[:, 0:1] == include_class_array).any(1) self.labels[i] = label[j] if segment: self.segments[i] = segment[j] if single_cls: # single-class training, merge all classes into 0 self.labels[i][:, 0] = 0 if segment: self.segments[i][:, 0] = 0 # Rectangular Training if self.rect: # Sort by aspect ratio s = self.shapes # wh ar = s[:, 1] / s[:, 0] # aspect ratio irect = ar.argsort() self.im_files = [self.im_files[i] for i in irect] self.label_files = [self.label_files[i] for i in irect] self.labels = [self.labels[i] for i in irect] self.shapes = s[irect] # wh ar = ar[irect] # Set training image shapes shapes = [[1, 1]] * nb for i in range(nb): ari = ar[bi == i] mini, maxi = ari.min(), ari.max() if maxi < 1: shapes[i] = [maxi, 1] elif mini > 1: shapes[i] = [1, 1 / mini] self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride # Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources) self.ims = [None] * n self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] if cache_images: gb = 0 # Gigabytes of cached images self.im_hw0, self.im_hw = [None] * n, [None] * n fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT, disable=LOCAL_RANK > 0) for i, x in pbar: if cache_images == 'disk': gb += self.npy_files[i].stat().st_size else: # 'ram' self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) gb += self.ims[i].nbytes pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' pbar.close() def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." with Pool(NUM_THREADS) as pool: pbar = tqdm( pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), desc=desc, total=len(self.im_files), bar_format=BAR_FORMAT ) for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f nf += nf_f ne += ne_f nc += nc_f if im_file: x[im_file] = [lb, shape, segments] if msg: msgs.append(msg) pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt" pbar.close() if msgs: LOGGER.info('\n'.join(msgs)) if nf == 0: LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') x['hash'] = get_hash(self.label_files + self.im_files) x['results'] = nf, nm, ne, nc, len(self.im_files) x['msgs'] = msgs # warnings x['version'] = self.cache_version # cache version try: np.save(path, x) # save cache for next time path.with_suffix('.cache.npy').rename(path) # remove .npy suffix LOGGER.info(f'{prefix}New cache created: {path}') except Exception as e: LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable return x def __len__(self): return len(self.im_files) # def __iter__(self): # self.count = -1 # print('ran dataset iter') # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) # return self def __getitem__(self, index): index = self.indices[index] # linear, shuffled, or image_weights hyp = self.hyp mosaic = self.mosaic and random.random() < hyp['mosaic'] if mosaic: # Load mosaic img, labels = self.load_mosaic(index) shapes = None # MixUp augmentation if random.random() < hyp['mixup']: img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) else: # Load image img, (h0, w0), (h, w) = self.load_image(index) # Letterbox shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling labels = self.labels[index].copy() if labels.size: # normalized xywh to pixel xyxy format labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) if self.augment: img, labels = random_perspective( img, labels, degrees=hyp['degrees'], translate=hyp['translate'], scale=hyp['scale'], shear=hyp['shear'], perspective=hyp['perspective'] ) nl = len(labels) # number of labels if nl: labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) if self.augment: # Albumentations img, labels = self.albumentations(img, labels) nl = len(labels) # update after albumentations # HSV color-space augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) # Flip up-down if random.random() < hyp['flipud']: img = np.flipud(img) if nl: labels[:, 2] = 1 - labels[:, 2] # Flip left-right if random.random() < hyp['fliplr']: img = np.fliplr(img) if nl: labels[:, 1] = 1 - labels[:, 1] # Cutouts # labels = cutout(img, labels, p=0.5) # nl = len(labels) # update after cutout labels_out = torch.zeros((nl, 6)) if nl: labels_out[:, 1:] = torch.from_numpy(labels) # Convert img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) return torch.from_numpy(img), labels_out, self.im_files[index], shapes def load_image(self, i): # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw) im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i], if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR assert im is not None, f'Image Not Found {f}' h0, w0 = im.shape[:2] # orig hw r = self.img_size / max(h0, w0) # ratio if r != 1: # if sizes are not equal interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp) return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized else: return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized def cache_images_to_disk(self, i): # Saves an image as an *.npy file for faster loading f = self.npy_files[i] if not f.exists(): np.save(f.as_posix(), cv2.imread(self.im_files[i])) def load_mosaic(self, index): # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic labels4, segments4 = [], [] s = self.img_size yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices random.shuffle(indices) for i, index in enumerate(indices): # Load image img, _, (h, w) = self.load_image(index) # place img in img4 if i == 0: # top left img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) elif i == 1: # top right x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h elif i == 2: # bottom left x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) elif i == 3: # bottom right x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] padw = x1a - x1b padh = y1a - y1b # Labels labels, segments = self.labels[index].copy(), self.segments[index].copy() if labels.size: labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format segments = [xyn2xy(x, w, h, padw, padh) for x in segments] labels4.append(labels) segments4.extend(segments) # Concat/clip labels labels4 = np.concatenate(labels4, 0) for x in (labels4[:, 1:], *segments4): np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() # img4, labels4 = replicate(img4, labels4) # replicate # Augment img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) img4, labels4 = random_perspective( img4, labels4, segments4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], shear=self.hyp['shear'], perspective=self.hyp['perspective'], border=self.mosaic_border ) # border to remove return img4, labels4 def load_mosaic9(self, index): # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic labels9, segments9 = [], [] s = self.img_size indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices random.shuffle(indices) hp, wp = -1, -1 # height, width previous for i, index in enumerate(indices): # Load image img, _, (h, w) = self.load_image(index) # place img in img9 if i == 0: # center img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles h0, w0 = h, w c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates elif i == 1: # top c = s, s - h, s + w, s elif i == 2: # top right c = s + wp, s - h, s + wp + w, s elif i == 3: # right c = s + w0, s, s + w0 + w, s + h elif i == 4: # bottom right c = s + w0, s + hp, s + w0 + w, s + hp + h elif i == 5: # bottom c = s + w0 - w, s + h0, s + w0, s + h0 + h elif i == 6: # bottom left c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h elif i == 7: # left c = s - w, s + h0 - h, s, s + h0 elif i == 8: # top left c = s - w, s + h0 - hp - h, s, s + h0 - hp padx, pady = c[:2] x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords # Labels labels, segments = self.labels[index].copy(), self.segments[index].copy() if labels.size: labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format segments = [xyn2xy(x, w, h, padx, pady) for x in segments] labels9.append(labels) segments9.extend(segments) # Image img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] hp, wp = h, w # height, width previous # Offset yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] # Concat/clip labels labels9 = np.concatenate(labels9, 0) labels9[:, [1, 3]] -= xc labels9[:, [2, 4]] -= yc c = np.array([xc, yc]) # centers segments9 = [x - c for x in segments9] for x in (labels9[:, 1:], *segments9): np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() # img9, labels9 = replicate(img9, labels9) # replicate # Augment img9, labels9 = random_perspective( img9, labels9, segments9, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], shear=self.hyp['shear'], perspective=self.hyp['perspective'], border=self.mosaic_border ) # border to remove return img9, labels9 @staticmethod def collate_fn(batch): im, label, path, shapes = zip(*batch) # transposed for i, lb in enumerate(label): lb[:, 0] = i # add target image index for build_targets() return torch.stack(im, 0), torch.cat(label, 0), path, shapes @staticmethod def collate_fn4(batch): img, label, path, shapes = zip(*batch) # transposed n = len(shapes) // 4 im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW i *= 4 if random.random() < 0.5: im = F.interpolate( img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', align_corners=False )[0].type(img[i].type()) lb = label[i] else: im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s im4.append(im) label4.append(lb) for i, lb in enumerate(label4): lb[:, 0] = i # add target image index for build_targets() return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4 # Ancillary functions -------------------------------------------------------------------------------------------------- def create_folder(path='./new'): # Create folder if os.path.exists(path): shutil.rmtree(path) # delete output folder os.makedirs(path) # make new output folder def flatten_recursive(path=DATASETS_DIR / 'coco128'): # Flatten a recursive directory by bringing all files to top level new_path = Path(str(path) + '_flat') create_folder(new_path) for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): shutil.copyfile(file, new_path / Path(file).name) def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes() # Convert detection dataset into classification dataset, with one directory per class path = Path(path) # images dir shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing files = list(path.rglob('*.*')) n = len(files) # number of files for im_file in tqdm(files, total=n): if im_file.suffix[1:] in IMG_FORMATS: # image im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB h, w = im.shape[:2] # labels lb_file = Path(img2label_paths([str(im_file)])[0]) if Path(lb_file).exists(): with open(lb_file) as f: lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels for j, x in enumerate(lb): c = int(x[0]) # class f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename if not f.parent.is_dir(): f.parent.mkdir(parents=True) b = x[1:] * [w, h, w, h] # box # b[2:] = b[2:].max() # rectangle to square b[2:] = b[2:] * 1.2 + 3 # pad b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image b[[1, 3]] = np.clip(b[[1, 3]], 0, h) assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): """ Autosplit a dataset into train/eval/test splits and save path/autosplit_*.txt files Usage: from utils.dataloaders import *; autosplit() Arguments path: Path to images directory weights: Train, eval, test weights (list, tuple) annotated_only: Only use images with an annotated txt file """ path = Path(path) # images dir files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only n = len(files) # number of files random.seed(0) # for reproducibility indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files [(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) for i, img in tqdm(zip(indices, files), total=n): if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label with open(path.parent / txt[i], 'a') as f: f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file def verify_image_label(args): # Verify one image-label pair im_file, lb_file, prefix = args nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments try: # verify images im = Image.open(im_file) im.verify() # PIL verify shape = exif_size(im) # image size assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' if im.format.lower() in ('jpg', 'jpeg'): with open(im_file, 'rb') as f: f.seek(-2, 2) if f.read() != b'\xff\xd9': # corrupt JPEG ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved' # verify labels if os.path.isfile(lb_file): nf = 1 # label found with open(lb_file) as f: lb = [x.split() for x in f.read().strip().splitlines() if len(x)] if any(len(x) > 6 for x in lb): # is segment classes = np.array([x[0] for x in lb], dtype=np.float32) segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) lb = np.array(lb, dtype=np.float32) nl = len(lb) if nl: assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' _, i = np.unique(lb, axis=0, return_index=True) if len(i) < nl: # duplicate row check lb = lb[i] # remove duplicates if segments: segments = segments[i] msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed' else: ne = 1 # label empty lb = np.zeros((0, 5), dtype=np.float32) else: nm = 1 # label missing lb = np.zeros((0, 5), dtype=np.float32) return im_file, lb, shape, segments, nm, nf, ne, nc, msg except Exception as e: nc = 1 msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}' return [None, None, None, None, nm, nf, ne, nc, msg] def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False): """ Return dataset statistics dictionary with images and instances counts per split per class To run in parent directory: export PYTHONPATH="$PWD/yolov5" Usage1: from utils.dataloaders import *; dataset_stats('coco128.yaml', autodownload=True) Usage2: from utils.dataloaders import *; dataset_stats('path/to/coco128_with_yaml.zip') Arguments path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally verbose: Print stats dictionary """ def _round_labels(labels): # Update labels to integer class and 6 decimal place floats return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] def _find_yaml(dir): # Return data.yaml file files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive assert files, f'No *.yaml file found in {dir}' if len(files) > 1: files = [f for f in files if f.stem == dir.stem] # prefer *.yaml files that match dir name assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed' assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}' return files[0] def _unzip(path): # Unzip data.zip if str(path).endswith('.zip'): # path is data.zip assert Path(path).is_file(), f'Error unzipping {path}, file not found' ZipFile(path).extractall(path=path.parent) # unzip dir = path.with_suffix('') # dataset directory == zip name assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' return True, str(dir), _find_yaml(dir) # zipped, data_dir, yaml_path else: # path is data.yaml return False, None, path def _hub_ops(f, max_dim=1920): # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing f_new = im_dir / Path(f).name # dataset-hub image filename try: # use PIL im = Image.open(f) r = max_dim / max(im.height, im.width) # ratio if r < 1.0: # image too large im = im.resize((int(im.width * r), int(im.height * r))) im.save(f_new, 'JPEG', quality=75, optimize=True) # save except Exception as e: # use OpenCV print(f'WARNING: HUB ops PIL failure {f}: {e}') im = cv2.imread(f) im_height, im_width = im.shape[:2] r = max_dim / max(im_height, im_width) # ratio if r < 1.0: # image too large im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) cv2.imwrite(str(f_new), im) zipped, data_dir, yaml_path = _unzip(Path(path)) try: with open(check_yaml(yaml_path), errors='ignore') as f: data = yaml.safe_load(f) # data dict if zipped: data['path'] = data_dir # TODO: should this be dir.resolve()?` except Exception: raise Exception("error/HUB/dataset_stats/yaml_load") check_dataset(data, autodownload) # download dataset if missing hub_dir = Path(data['path'] + ('-hub' if hub else '')) stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary for split in 'train', 'eval', 'test': if data.get(split) is None: stats[split] = None # i.e. no test set continue x = [] dataset = LoadImagesAndLabels(data[split]) # load dataset for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'): x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc'])) x = np.array(x) # shape(128x80) stats[split] = { 'instance_stats': { 'total': int(x.sum()), 'per_class': x.sum(0).tolist()}, 'image_stats': { 'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), 'per_class': (x > 0).sum(0).tolist()}, 'labels': [{ str(Path(k).name): _round_labels(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]} if hub: im_dir = hub_dir / 'images' im_dir.mkdir(parents=True, exist_ok=True) for _ in tqdm(ThreadPool(NUM_THREADS).imap(_hub_ops, dataset.im_files), total=dataset.n, desc='HUB Ops'): pass # Profile stats_path = hub_dir / 'stats.json' if profile: for _ in range(1): file = stats_path.with_suffix('.npy') t1 = time.time() np.save(file, stats) t2 = time.time() x = np.load(file, allow_pickle=True) print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write') file = stats_path.with_suffix('.json') t1 = time.time() with open(file, 'w') as f: json.dump(stats, f) # save stats *.json t2 = time.time() with open(file) as f: x = json.load(f) # load hyps dict print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write') # Save, print and return if hub: print(f'Saving {stats_path.resolve()}...') with open(stats_path, 'w') as f: json.dump(stats, f) # save stats.json if verbose: print(json.dumps(stats, indent=2, sort_keys=False)) return stats ================================================ FILE: module/detect/utils/docker/Dockerfile ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch FROM nvcr.io/nvidia/pytorch:22.06-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ # Install linux packages RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx # Install pip packages COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext # torch torchvision RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 # Create working directory RUN mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents COPY . /usr/src/app RUN git clone https://github.com/ultralytics/yolov5 /usr/src/yolov5 # Set environment variables ENV OMP_NUM_THREADS=8 # Usage Examples ------------------------------------------------------------------------------------------------------- # Build and Push # t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t # Pull and Run # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t # Pull and Run with local directory access # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t # Kill all # sudo docker kill $(sudo docker ps -q) # Kill all image-based # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) # Bash into running container # sudo docker exec -it 5a9b5863d93d bash # Bash into stopped container # id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash # Clean up # docker system prune -a --volumes # Update Ubuntu drivers # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ # DDP test # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 trainfd.py --epochs 3 # GCP VM from Image # docker.io/ultralytics/yolov5:latest ================================================ FILE: module/detect/utils/docker/Dockerfile-arm64 ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu FROM arm64v8/ubuntu:20.04 # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ # Install linux packages RUN apt update RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc \ libgl1-mesa-glx libglib2.0-0 libpython3.8-dev # RUN alias python=python3 # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel RUN pip install --no-cache -r requirements.txt gsutil notebook \ tensorflow-aarch64 # tensorflowjs \ # onnx onnx-simplifier onnxruntime \ # coremltools openvino-dev \ # Create working directory RUN mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents COPY . /usr/src/app RUN git clone https://github.com/ultralytics/yolov5 /usr/src/yolov5 # Usage Examples ------------------------------------------------------------------------------------------------------- # Build and Push # t=ultralytics/yolov5:latest-M1 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t # Pull and Run # t=ultralytics/yolov5:latest-M1 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t ================================================ FILE: module/detect/utils/docker/Dockerfile-cpu ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu FROM ubuntu:20.04 # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ # Install linux packages RUN apt update RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3.8-dev # RUN alias python=python3 # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu tensorflowjs \ --extra-index-url https://download.pytorch.org/whl/cpu # Create working directory RUN mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents COPY . /usr/src/app RUN git clone https://github.com/ultralytics/yolov5 /usr/src/yolov5 # Usage Examples ------------------------------------------------------------------------------------------------------- # Build and Push # t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t # Pull and Run # t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t ================================================ FILE: module/detect/utils/downloads.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Download utils """ import logging import os import platform import subprocess import time import urllib from pathlib import Path from zipfile import ZipFile import requests import torch def is_url(url): # Check if online file exists try: r = urllib.request.urlopen(url) # response return r.getcode() == 200 except urllib.request.HTTPError: return False def gsutil_getsize(url=''): # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') return eval(s.split(' ')[0]) if len(s) else 0 # bytes def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes from utils.general import LOGGER file = Path(file) assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" try: # url1 LOGGER.info(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO) assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check except Exception as e: # url2 file.unlink(missing_ok=True) # remove partial downloads LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail finally: if not file.exists() or file.stat().st_size < min_bytes: # check file.unlink(missing_ok=True) # remove partial downloads LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") LOGGER.info('') def attempt_download(file, repo='ultralytics/yolov5', release='v6.1'): # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.1', etc. from utils.general import LOGGER def github_assets(repository, version='latest'): # Return GitHub repo tag (i.e. 'v6.1') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) if version != 'latest': version = f'tags/{version}' # i.e. tags/v6.1 response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets file = Path(str(file).strip().replace("'", '')) if not file.exists(): # URL specified name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. if str(file).startswith(('http:/', 'https:/')): # download url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... if Path(file).is_file(): LOGGER.info(f'Found {url} locally at {file}') # file already exists else: safe_download(file=file, url=url, min_bytes=1E5) return file # GitHub assets assets = [ 'yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov5n6.pt', 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] try: tag, assets = github_assets(repo, release) except Exception: try: tag, assets = github_assets(repo) # latest release except Exception: try: tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] except Exception: tag = release file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) if name in assets: url3 = 'https://drive.google.com/drive/folders/1EFQTEUeXWSFww0luse2jB9M1QNZQGwNl' # backup gdrive mirror safe_download( file, url=f'https://github.com/{repo}/releases/download/{tag}/{name}', url2=f'https://storage.googleapis.com/{repo}/{tag}/{name}', # backup url (optional) min_bytes=1E5, error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') return str(file) def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download() t = time.time() file = Path(file) cookie = Path('cookie') # gdrive cookie print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') file.unlink(missing_ok=True) # remove existing file cookie.unlink(missing_ok=True) # remove existing cookie # Attempt file download out = "NUL" if platform.system() == "Windows" else "/dev/null" os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') if os.path.exists('cookie'): # large file s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' else: # small file s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' r = os.system(s) # execute, capture return cookie.unlink(missing_ok=True) # remove existing cookie # Error check if r != 0: file.unlink(missing_ok=True) # remove partial print('Download error ') # raise Exception('Download error') return r # Unzip if archive if file.suffix == '.zip': print('unzipping... ', end='') ZipFile(file).extractall(path=file.parent) # unzip file.unlink() # remove zip print(f'Done ({time.time() - t:.1f}s)') return r def get_token(cookie="./cookie"): with open(cookie) as f: for line in f: if "download" in line: return line.split()[-1] return "" # Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- # # # def upload_blob(bucket_name, source_file_name, destination_blob_name): # # Uploads a file to a bucket # # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python # # storage_client = storage.Client() # bucket = storage_client.get_bucket(bucket_name) # blob = bucket.blob(destination_blob_name) # # blob.upload_from_filename(source_file_name) # # print('File {} uploaded to {}.'.format( # source_file_name, # destination_blob_name)) # # # def download_blob(bucket_name, source_blob_name, destination_file_name): # # Uploads a blob from a bucket # storage_client = storage.Client() # bucket = storage_client.get_bucket(bucket_name) # blob = bucket.blob(source_blob_name) # # blob.download_to_filename(destination_file_name) # # print('Blob {} downloaded to {}.'.format( # source_blob_name, # destination_file_name)) ================================================ FILE: module/detect/utils/flask_rest_api/README.md ================================================ # Flask REST API [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). ## Requirements [Flask](https://palletsprojects.com/p/flask/) is required. Install with: ```shell $ pip install Flask ``` ## Run After Flask installation run: ```shell $ python3 restapi.py --port 5000 ``` Then use [curl](https://curl.se/) to perform a request: ```shell $ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s' ``` The model inference results are returned as a JSON response: ```json [ { "class": 0, "confidence": 0.8900438547, "height": 0.9318675399, "name": "person", "width": 0.3264600933, "xcenter": 0.7438579798, "ycenter": 0.5207948685 }, { "class": 0, "confidence": 0.8440024257, "height": 0.7155083418, "name": "person", "width": 0.6546785235, "xcenter": 0.427829951, "ycenter": 0.6334488392 }, { "class": 27, "confidence": 0.3771208823, "height": 0.3902671337, "name": "tie", "width": 0.0696444362, "xcenter": 0.3675483763, "ycenter": 0.7991207838 }, { "class": 27, "confidence": 0.3527112305, "height": 0.1540903747, "name": "tie", "width": 0.0336618312, "xcenter": 0.7814827561, "ycenter": 0.5065554976 } ] ``` An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given in `example_request.py` ================================================ FILE: module/detect/utils/flask_rest_api/example_request.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Perform test request """ import pprint import requests DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" IMAGE = "zidane.jpg" # Read image with open(IMAGE, "rb") as f: image_data = f.read() response = requests.post(DETECTION_URL, files={"image": image_data}).json() pprint.pprint(response) ================================================ FILE: module/detect/utils/flask_rest_api/restapi.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run a Flask REST API exposing a YOLOv5s model """ import argparse import io import torch from flask import Flask, request from PIL import Image app = Flask(__name__) DETECTION_URL = "/v1/object-detection/yolov5s" @app.route(DETECTION_URL, methods=["POST"]) def predict(): if request.method != "POST": return if request.files.get("image"): # Method 1 # with request.files["image"] as f: # im = Image.open(io.BytesIO(f.read())) # Method 2 im_file = request.files["image"] im_bytes = im_file.read() im = Image.open(io.BytesIO(im_bytes)) results = model(im, size=640) # reduce size=320 for faster inference return results.pandas().xyxy[0].to_json(orient="records") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") parser.add_argument("--port", default=5000, type=int, help="port number") opt = parser.parse_args() # Fix known issue urllib.error.HTTPError 403: rate limit exceeded https://github.com/ultralytics/yolov5/pull/7210 torch.hub._validate_not_a_forked_repo = lambda a, b, c: True model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat ================================================ FILE: module/detect/utils/general.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ General utils """ import contextlib import glob import inspect import logging import os import platform import random import re import shutil import signal import threading import time import urllib from datetime import datetime from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from subprocess import check_output from typing import Optional from zipfile import ZipFile import cv2 import math import numpy as np import pandas as pd import pkg_resources as pkg import torch import torchvision import yaml from utils.downloads import gsutil_getsize from utils.metrics import box_iou, fitness FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory RANK = int(os.getenv('RANK', -1)) # Settings DATASETS_DIR = ROOT.parent / 'datasets' # YOLOv5 datasets directory NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 pd.options.display.max_columns = 10 cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) def is_kaggle(): # Is environment a Kaggle Notebook? try: assert os.environ.get('PWD') == '/kaggle/working' assert os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' return True except AssertionError: return False def is_writeable(dir, test=False): # Return True if directory has write permissions, test opening a file with write permissions if test=True if not test: return os.access(dir, os.R_OK) # possible issues on Windows file = Path(dir) / 'tmp.txt' try: with open(file, 'w'): # open file with write permissions pass file.unlink() # remove file return True except OSError: return False def set_logging(name=None, verbose=VERBOSE): # Sets level and returns logger if is_kaggle(): for h in logging.root.handlers: logging.root.removeHandler(h) # remove all handlers associated with the root logger object rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR log = logging.getLogger(name) log.setLevel(level) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(message)s")) handler.setLevel(level) log.addHandler(handler) # set_logging() # run before defining LOGGER LOGGER = logging.getLogger() # define globally (used in train.py, eval.py, detect.py, etc.) def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. env = os.getenv(env_var) if env: path = Path(env) # use environment variable else: cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable path.mkdir(exist_ok=True) # make if required return path CONFIG_DIR = user_config_dir() # Ultralytics settings dir class Profile(contextlib.ContextDecorator): # Usage: @Profile() decorator or 'with Profile():' context manager def __enter__(self): self.start = time.time() def __exit__(self, type, value, traceback): print(f'Profile results: {time.time() - self.start:.5f}s') class Timeout(contextlib.ContextDecorator): # Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): self.seconds = int(seconds) self.timeout_message = timeout_msg self.suppress = bool(suppress_timeout_errors) def _timeout_handler(self, signum, frame): raise TimeoutError(self.timeout_message) def __enter__(self): if platform.system() != 'Windows': # not supported on Windows signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM signal.alarm(self.seconds) # start countdown for SIGALRM to be raised def __exit__(self, exc_type, exc_val, exc_tb): if platform.system() != 'Windows': signal.alarm(0) # Cancel SIGALRM if it's scheduled if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError return True class WorkingDirectory(contextlib.ContextDecorator): # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager def __init__(self, new_dir): self.dir = new_dir # new dir self.cwd = Path.cwd().resolve() # current dir def __enter__(self): os.chdir(self.dir) def __exit__(self, exc_type, exc_val, exc_tb): os.chdir(self.cwd) def try_except(func): # try-except function. Usage: @try_except decorator def handler(*args, **kwargs): try: func(*args, **kwargs) except Exception as e: print(e) return handler def threaded(func): # Multi-threads a target function and returns thread. Usage: @threaded decorator def wrapper(*args, **kwargs): thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) thread.start() return thread return wrapper def methods(instance): # Get class/instance methods return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] def print_args(args: Optional[dict] = None, show_file=True, show_fcn=False): # Print function arguments (optional args dict) x = inspect.currentframe().f_back # previous frame file, _, fcn, _, _ = inspect.getframeinfo(x) if args is None: # get args automatically args, _, _, frm = inspect.getargvalues(x) args = {k: v for k, v in frm.items() if k in args} s = (f'{Path(file).stem}: ' if show_file else '') + (f'{fcn}: ' if show_fcn else '') LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) def init_seeds(seed=0, deterministic=False): # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible import torch.backends.cudnn as cudnn if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 torch.use_deterministic_algorithms(True) os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' os.environ['PYTHONHASHSEED'] = str(seed) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe def intersect_dicts(da, db, exclude=()): # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} def get_latest_run(search_dir='.'): # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) return max(last_list, key=os.path.getctime) if last_list else '' def is_docker(): # Is environment a Docker container? return Path('/workspace').exists() # or Path('/.dockerenv').exists() def is_colab(): # Is environment a Google Colab instance? try: import google.colab return True except ImportError: return False def is_pip(): # Is file in a pip package? return 'site-packages' in Path(__file__).resolve().parts def is_ascii(s=''): # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) s = str(s) # convert list, tuple, None, etc. to str return len(s.encode().decode('ascii', 'ignore')) == len(s) def is_chinese(s='人工智能'): # Is string composed of any Chinese characters? return bool(re.search('[\u4e00-\u9fff]', str(s))) def emojis(str=''): # Return platform-dependent emoji-safe version of string return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str def file_age(path=__file__): # Return days since last file update dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta return dt.days # + dt.seconds / 86400 # fractional days def file_date(path=__file__): # Return human-readable file modification date, i.e. '2021-3-26' t = datetime.fromtimestamp(Path(path).stat().st_mtime) return f'{t.year}-{t.month}-{t.day}' def file_size(path): # Return file/dir size (MB) mb = 1 << 20 # bytes to MiB (1024 ** 2) path = Path(path) if path.is_file(): return path.stat().st_size / mb elif path.is_dir(): return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb else: return 0.0 def check_online(): # Check internet connectivity import socket try: socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility return True except OSError: return False def git_describe(path=ROOT): # path must be a directory # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe try: assert (Path(path) / '.git').is_dir() return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] except Exception: return '' @try_except @WorkingDirectory(ROOT) def check_git_status(): # Recommend 'git pull' if code is out of date msg = ', for updates see https://github.com/ultralytics/yolov5' s = colorstr('github: ') # string assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg assert not is_docker(), s + 'skipping check (Docker image)' + msg assert check_online(), s + 'skipping check (offline)' + msg cmd = 'git fetch && git config --get remote.origin.url' url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind if n > 0: s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update." else: s += f'up to date with {url} ✅' LOGGER.info(emojis(s)) # emoji-safe def check_python(minimum='3.7.0'): # Check current python version vs. required python version check_version(platform.python_version(), minimum, name='Python ', hard=True) def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) result = (current == minimum) if pinned else (current >= minimum) # bool s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string if hard: assert result, s # assert min requirements met if verbose and not result: LOGGER.warning(s) return result @try_except def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=()): # Check installed dependencies meet requirements (pass *.txt file or list of packages) prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version if isinstance(requirements, (str, Path)): # requirements.txt file file = Path(requirements) assert file.exists(), f"{prefix} {file.resolve()} not found, check failed." with file.open() as f: requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] else: # list or tuple of packages requirements = [x for x in requirements if x not in exclude] n = 0 # number of packages updates for i, r in enumerate(requirements): try: pkg.require(r) except Exception: # DistributionNotFound or VersionConflict if requirements not met s = f"{prefix} {r} not found and is required by YOLOv5" if install and AUTOINSTALL: # check environment variable LOGGER.info(f"{s}, attempting auto-update...") try: assert check_online(), f"'pip install {r}' skipped (offline)" LOGGER.info(check_output(f'pip install "{r}" {cmds[i] if cmds else ""}', shell=True).decode()) n += 1 except Exception as e: LOGGER.warning(f'{prefix} {e}') else: LOGGER.info(f'{s}. Please install and rerun your command.') if n: # if packages updated source = file.resolve() if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" LOGGER.info(emojis(s)) def check_img_size(imgsz, s=32, floor=0): # Verify image size is a multiple of stride s in each dimension if isinstance(imgsz, int): # integer i.e. img_size=640 new_size = max(make_divisible(imgsz, int(s)), floor) else: # list i.e. img_size=[640, 480] imgsz = list(imgsz) # convert to list if tuple new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] if new_size != imgsz: LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') return new_size def check_imshow(): # Check if environment supports image displays try: assert not is_docker(), 'cv2.imshow() is disabled in Docker environments' assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments' cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) cv2.destroyAllWindows() cv2.waitKey(1) return True except Exception as e: LOGGER.warning(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') return False def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): # Check file(s) for acceptable suffix if file and suffix: if isinstance(suffix, str): suffix = [suffix] for f in file if isinstance(file, (list, tuple)) else [file]: s = Path(f).suffix.lower() # file suffix if len(s): assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" def check_yaml(file, suffix=('.yaml', '.yml')): # Search/download YAML file (if necessary) and return path, checking suffix return check_file(file, suffix) def check_file(file, suffix=''): # Search/download file (if necessary) and return path check_suffix(file, suffix) # optional file = str(file) # convert to str() if Path(file).is_file() or not file: # exists return file elif file.startswith(('http:/', 'https:/')): # download url = file # warning: Pathlib turns :// -> :/ file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth if Path(file).is_file(): LOGGER.info(f'Found {url} locally at {file}') # file already exists else: LOGGER.info(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, file) assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check return file else: # search files = [] for d in 'data', 'models', 'utils': # search directories files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file assert len(files), f'File not found: {file}' # assert file was found assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique return files[0] # return file def check_font(font=FONT, progress=False): # Download font to CONFIG_DIR if necessary font = Path(font) file = CONFIG_DIR / font.name if not font.exists() and not file.exists(): url = "https://ultralytics.com/assets/" + font.name LOGGER.info(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, str(file), progress=progress) def check_dataset(data, autodownload=True): # Download, check and/or unzip dataset if not found locally # Download (optional) extract_dir = '' if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip download(data, dir=DATASETS_DIR, unzip=True, delete=False, curl=False, threads=1) data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml')) extract_dir, autodownload = data.parent, False # Read yaml (optional) if isinstance(data, (str, Path)): with open(data, errors='ignore') as f: data = yaml.safe_load(f) # dictionary # Checks for k in 'train', 'eval', 'nc': assert k in data, emojis(f"data.yaml '{k}:' field missing ❌") if 'names' not in data: LOGGER.warning(emojis("data.yaml 'names:' field missing ⚠️, assigning default names 'class0', 'class1', etc.")) data['names'] = [f'class{i}' for i in range(data['nc'])] # default names # Resolve paths path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' if not path.is_absolute(): path = (ROOT / path).resolve() for k in 'train', 'eval', 'test': if data.get(k): # prepend path data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] # Parse yaml train, val, test, s = (data.get(x) for x in ('train', 'eval', 'test', 'download')) if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # eval path if not all(x.exists() for x in val): LOGGER.info(emojis('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()])) if not s or not autodownload: raise Exception(emojis('Dataset not found ❌')) t = time.time() root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename LOGGER.info(f'Downloading {s} to {f}...') torch.hub.download_url_to_file(s, f) Path(root).mkdir(parents=True, exist_ok=True) # create root ZipFile(f).extractall(path=root) # unzip Path(f).unlink() # remove zip r = None # success elif s.startswith('bash '): # bash script LOGGER.info(f'Running {s} ...') r = os.system(s) else: # python script r = exec(s, {'yaml': data}) # return None dt = f'({round(time.time() - t, 1)}s)' s = f"success ✅ {dt}, saved to {colorstr('bold', root)}" if r in (0, None) else f"failure {dt} ❌" LOGGER.info(emojis(f"Dataset download {s}")) check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts return data # dictionary def check_amp(model): # Check PyTorch Automatic Mixed Precision (AMP) functionality. Return True on correct operation from models.common import AutoShape, DetectMultiBackend def amp_allclose(model, im): # All close FP32 vs AMP results m = AutoShape(model, verbose=False) # model a = m(im).xywhn[0] # FP32 inference m.amp = True b = m(im).xywhn[0] # AMP inference return a.shape == b.shape and torch.allclose(a, b, atol=0.1) # close to 10% absolute tolerance prefix = colorstr('AMP: ') device = next(model.parameters()).device # get model device if device.type == 'cpu': return False # AMP disabled on CPU f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3)) try: assert amp_allclose(model, im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im) LOGGER.info(emojis(f'{prefix}checks passed ✅')) return True except Exception: help_url = 'https://github.com/ultralytics/yolov5/issues/7908' LOGGER.warning(emojis(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}')) return False def url2file(url): # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3): # Multi-threaded file download and unzip function, used in data.yaml for autodownload def download_one(url, dir): # Download 1 file success = True f = dir / Path(url).name # filename if Path(url).is_file(): # exists in current path Path(url).rename(f) # move to dir elif not f.exists(): LOGGER.info(f'Downloading {url} to {f}...') for i in range(retry + 1): if curl: s = 'sS' if threads > 1 else '' # silent r = os.system(f'curl -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue success = r == 0 else: torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download success = f.is_file() if success: break elif i < retry: LOGGER.warning(f'Download failure, retrying {i + 1}/{retry} {url}...') else: LOGGER.warning(f'Failed to download {url}...') if unzip and success and f.suffix in ('.zip', '.gz'): LOGGER.info(f'Unzipping {f}...') if f.suffix == '.zip': ZipFile(f).extractall(path=dir) # unzip elif f.suffix == '.gz': os.system(f'tar xfz {f} --directory {f.parent}') # unzip if delete: f.unlink() # remove zip dir = Path(dir) dir.mkdir(parents=True, exist_ok=True) # make directory if threads > 1: pool = ThreadPool(threads) pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded pool.close() pool.join() else: for u in [url] if isinstance(url, (str, Path)) else url: download_one(u, dir) def make_divisible(x, divisor): # Returns nearest x divisible by divisor if isinstance(divisor, torch.Tensor): divisor = int(divisor.max()) # to int return math.ceil(x / divisor) * divisor def clean_str(s): # Cleans a string by replacing special characters with underscore _ return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) def one_cycle(y1=0.0, y2=1.0, steps=100): # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 def colorstr(*input): # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string colors = { 'black': '\033[30m', # basic colors 'red': '\033[31m', 'green': '\033[32m', 'yellow': '\033[33m', 'blue': '\033[34m', 'magenta': '\033[35m', 'cyan': '\033[36m', 'white': '\033[37m', 'bright_black': '\033[90m', # bright colors 'bright_red': '\033[91m', 'bright_green': '\033[92m', 'bright_yellow': '\033[93m', 'bright_blue': '\033[94m', 'bright_magenta': '\033[95m', 'bright_cyan': '\033[96m', 'bright_white': '\033[97m', 'end': '\033[0m', # misc 'bold': '\033[1m', 'underline': '\033[4m'} return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] def labels_to_class_weights(labels, nc=80): # Get class weights (inverse frequency) from training labels if labels[0] is None: # no labels loaded return torch.Tensor() labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO classes = labels[:, 0].astype(int) # labels = [class xywh] weights = np.bincount(classes, minlength=nc) # occurrences per class # Prepend gridpoint count (for uCE training) # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start weights[weights == 0] = 1 # replace empty bins with 1 weights = 1 / weights # number of targets per class weights /= weights.sum() # normalize return torch.from_numpy(weights).float() def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): # Produces image weights based on class_weights and image contents # Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels]) return (class_weights.reshape(1, nc) * class_counts).sum(1) def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet return [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] def xyxy2xywh(x): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center y[:, 2] = x[:, 2] - x[:, 0] # width y[:, 3] = x[:, 3] - x[:, 1] # height return y def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y return y def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y return y def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right if clip: clip_coords(x, (h - eps, w - eps)) # warning: inplace clip y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center y[:, 2] = (x[:, 2] - x[:, 0]) / w # width y[:, 3] = (x[:, 3] - x[:, 1]) / h # height return y def xyn2xy(x, w=640, h=640, padw=0, padh=0): # Convert normalized segments into pixel segments, shape (n,2) y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = w * x[:, 0] + padw # top left x y[:, 1] = h * x[:, 1] + padh # top left y return y def segment2box(segment, width=640, height=640): # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) x, y = segment.T # segment xy inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) x, y, = x[inside], y[inside] return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy def segments2boxes(segments): # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) boxes = [] for s in segments: x, y = s.T # segment xy boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy return xyxy2xywh(np.array(boxes)) # cls, xywh def resample_segments(segments, n=1000): # Up-sample an (n,2) segment for i, s in enumerate(segments): s = np.concatenate((s, s[0:1, :]), axis=0) x = np.linspace(0, len(s) - 1, n) xp = np.arange(len(s)) segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy return segments def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): # Rescale coords (xyxy) from img1_shape to img0_shape if ratio_pad is None: # calculate from img0_shape gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding else: gain = ratio_pad[0][0] pad = ratio_pad[1] coords[:, [0, 2]] -= pad[0] # x padding coords[:, [1, 3]] -= pad[1] # y padding coords[:, :4] /= gain clip_coords(coords, img0_shape) return coords def clip_coords(boxes, shape): # Clip bounding xyxy bounding boxes to image shape (height, width) if isinstance(boxes, torch.Tensor): # faster individually boxes[:, 0].clamp_(0, shape[1]) # x1 boxes[:, 1].clamp_(0, shape[0]) # y1 boxes[:, 2].clamp_(0, shape[1]) # x2 boxes[:, 3].clamp_(0, shape[0]) # y2 else: # np.array (faster grouped) boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 def non_max_suppression( prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=(), max_det=300 ): """Non-Maximum Suppression (NMS) on inference results to reject overlapping bounding boxes Returns: list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ bs = prediction.shape[0] # batch size nc = prediction.shape[2] - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates # Checks assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' # Settings # min_wh = 2 # (pixels) minimum box width and height max_wh = 7680 # (pixels) maximum box width and height max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() time_limit = 0.3 + 0.03 * bs # seconds to quit after redundant = True # require redundant detections multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) merge = False # use merge-NMS t = time.time() output = [torch.zeros((0, 6), device=prediction.device)] * bs for xi, x in enumerate(prediction): # image index, image inference # Apply constraints # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height x = x[xc[xi]] # confidence # Cat apriori labels if autolabelling if labels and len(labels[xi]): lb = labels[xi] v = torch.zeros((len(lb), nc + 5), device=x.device) v[:, :4] = lb[:, 1:5] # box v[:, 4] = 1.0 # conf v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls x = torch.cat((x, v), 0) # If none remain process next image if not x.shape[0]: continue # Compute conf x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf # Box (center x, center y, width, height) to (x1, y1, x2, y2) box = xywh2xyxy(x[:, :4]) # Detections matrix nx6 (xyxy, conf, cls) if multi_label: i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) else: # best class only conf, j = x[:, 5:].max(1, keepdim=True) x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] # Filter by class if classes is not None: x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] # Apply finite constraint # if not torch.isfinite(x).all(): # x = x[torch.isfinite(x).all(1)] # Check shape n = x.shape[0] # number of boxes if not n: # no boxes continue elif n > max_nms: # excess boxes x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence # Batched NMS c = x[:, 5:6] * (0 if agnostic else max_wh) # classes boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS if i.shape[0] > max_det: # limit detections i = i[:max_det] if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix weights = iou * scores[None] # box weights x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes if redundant: i = i[iou.sum(1) > 1] # require redundancy output[xi] = x[i] if (time.time() - t) > time_limit: LOGGER.warning(f'WARNING: NMS time limit {time_limit:.3f}s exceeded') break # time limit exceeded return output def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() # Strip optimizer from 'f' to finalize training, optionally save as 's' x = torch.load(f, map_location=torch.device('cpu')) if x.get('ema'): x['model'] = x['ema'] # replace model with ema for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys x[k] = None x['epoch'] = -1 x['model'].half() # to FP16 for p in x['model'].parameters(): p.requires_grad = False torch.save(x, s or f) mb = os.path.getsize(s or f) / 1E6 # filesize LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") def print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): evolve_csv = save_dir / 'evolve.csv' evolve_yaml = save_dir / 'hyp_evolve.yaml' keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'eval/box_loss', 'eval/obj_loss', 'eval/cls_loss') + tuple(hyp.keys()) # [results + hyps] keys = tuple(x.strip() for x in keys) vals = results + tuple(hyp.values()) n = len(keys) # Download (optional) if bucket: url = f'gs://{bucket}/evolve.csv' if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local # Log to evolve.csv s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header with open(evolve_csv, 'a') as f: f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') # Save yaml with open(evolve_yaml, 'w') as f: data = pd.read_csv(evolve_csv) data = data.rename(columns=lambda x: x.strip()) # strip keys i = np.argmax(fitness(data.values[:, :4])) # generations = len(data) f.write( '# YOLOv5 Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' + f'# Last generation: {generations - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n' ) yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False) # Print to screen LOGGER.info( prefix + f'{generations} generations finished, current result:\n' + prefix + ', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + prefix + ', '.join( f'{x:20.5g}' for x in vals ) + '\n\n' ) if bucket: os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload def apply_classifier(x, model, img, im0): # Apply a second stage classifier to YOLO outputs # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval() im0 = [im0] if isinstance(im0, np.ndarray) else im0 for i, d in enumerate(x): # per image if d is not None and len(d): d = d.clone() # Reshape and pad cutouts b = xyxy2xywh(d[:, :4]) # boxes b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad d[:, :4] = xywh2xyxy(b).long() # Rescale boxes from img_size to im0 size scale_coords(img.shape[2:], d[:, :4], im0[i].shape) # Classes pred_cls1 = d[:, 5].long() ims = [] for a in d: cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] im = cv2.resize(cutout, (224, 224)) # BGR im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 im /= 255 # 0 - 255 to 0.0 - 1.0 ims.append(im) pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections return x def increment_path(path, exist_ok=False, sep='', mkdir=False): # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic if path.exists() and not exist_ok: path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') # Method 1 for n in range(2, 9999): p = f'{path}{sep}{n}{suffix}' # increment path if not os.path.exists(p): # break path = Path(p) # Method 2 (deprecated) # dirs = glob.glob(f"{path}{sep}*") # similar paths # matches = [re.search(rf"{path.stem}{sep}(\d+)", d) for d in dirs] # i = [int(m.groups()[0]) for m in matches if m] # indices # n = max(i) + 1 if i else 2 # increment number # path = Path(f"{path}{sep}{n}{suffix}") # increment path if mkdir: path.mkdir(parents=True, exist_ok=True) # make directory return path # OpenCV Chinese-friendly functions ------------------------------------------------------------------------------------ imshow_ = cv2.imshow # copy to avoid recursion errors def imread(path, flags=cv2.IMREAD_COLOR): return cv2.imdecode(np.fromfile(path, np.uint8), flags) def imwrite(path, im): try: cv2.imencode(Path(path).suffix, im)[1].tofile(path) return True except Exception: return False def imshow(path, im): imshow_(path.encode('unicode_escape').decode(), im) cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine # Variables ------------------------------------------------------------------------------------------------------------ NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm ================================================ FILE: module/detect/utils/google_app_engine/Dockerfile ================================================ FROM gcr.io/google-appengine/python # Create a virtualenv for dependencies. This isolates these packages from # system-level packages. # Use -p python3 or -p python3.7 to select python version. Default is version 2. RUN virtualenv /env -p python3 # Setting these environment variables are the same as running # source /env/bin/activate. ENV VIRTUAL_ENV /env ENV PATH /env/bin:$PATH RUN apt-get update && apt-get install -y python-opencv # Copy the application's requirements.txt and run pip to install all # dependencies into the virtualenv. ADD requirements.txt /app/requirements.txt RUN pip install -r /app/requirements.txt # Add the application source code. ADD . /app # Run a WSGI server to serve the application. gunicorn must be declared as # a dependency in requirements.txt. CMD gunicorn -b :$PORT main:app ================================================ FILE: module/detect/utils/google_app_engine/additional_requirements.txt ================================================ # add these requirements in your app on top of the existing ones pip==21.1 Flask==1.0.2 gunicorn==19.9.0 ================================================ FILE: module/detect/utils/google_app_engine/app.yaml ================================================ runtime: custom env: flex service: yolov5app liveness_check: initial_delay_sec: 600 manual_scaling: instances: 1 resources: cpu: 1 memory_gb: 4 disk_size_gb: 20 ================================================ FILE: module/detect/utils/loggers/__init__.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Logging utils """ import os import warnings import pkg_resources as pkg import torch from torch.utils.tensorboard import SummaryWriter from utils.general import colorstr, cv2, emojis from utils.loggers.wandb.wandb_utils import WandbLogger from utils.plots import plot_images, plot_results from utils.torch_utils import de_parallel LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases RANK = int(os.getenv('RANK', -1)) try: import wandb assert hasattr(wandb, '__version__') # verify package import not local dir if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in {0, -1}: try: wandb_login_success = wandb.login(timeout=30) except wandb.errors.UsageError: # known non-TTY terminal issue wandb_login_success = False if not wandb_login_success: wandb = None except (ImportError, AssertionError): wandb = None class Loggers(): # YOLOv5 Loggers class def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): self.save_dir = save_dir self.weights = weights self.opt = opt self.hyp = hyp self.logger = logger # for printing results to console self.include = include self.keys = [ 'train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics 'eval/box_loss', 'eval/obj_loss', 'eval/cls_loss', # eval loss 'x/lr0', 'x/lr1', 'x/lr2'] # params self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95'] for k in LOGGERS: setattr(self, k, None) # init empty logger dictionary self.csv = True # always log to csv # Message if not wandb: prefix = colorstr('Weights & Biases: ') s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)" self.logger.info(emojis(s)) # TensorBoard s = self.save_dir if 'tb' in self.include and not self.opt.evolve: prefix = colorstr('TensorBoard: ') self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/") self.tb = SummaryWriter(str(s)) # W&B if wandb and 'wandb' in self.include: wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None self.opt.hyp = self.hyp # add hyperparameters self.wandb = WandbLogger(self.opt, run_id) # temp warn. because nested artifacts not supported after 0.12.10 if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): self.logger.warning( "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." ) else: self.wandb = None def on_train_start(self): # Callback runs on train start pass def on_pretrain_routine_end(self): # Callback runs on pre-train routine end paths = self.save_dir.glob('*labels*.jpg') # training labels if self.wandb: self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): # Callback runs on train batch end if plots: if ni == 0: if self.tb and not self.opt.sync_bn: # --sync known issue https://github.com/ultralytics/yolov5/issues/3754 with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) if ni < 3: f = self.save_dir / f'train_batch{ni}.jpg' # filename plot_images(imgs, targets, paths, f) if self.wandb and ni == 10: files = sorted(self.save_dir.glob('train*.jpg')) self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) def on_train_epoch_end(self, epoch): # Callback runs on train epoch end if self.wandb: self.wandb.current_epoch = epoch + 1 def on_val_image_end(self, pred, predn, path, names, im): # Callback runs on eval image end if self.wandb: self.wandb.val_one_image(pred, predn, path, names, im) def on_val_end(self): # Callback runs on eval end if self.wandb: files = sorted(self.save_dir.glob('eval*.jpg')) self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): # Callback runs at the end of each fit (train+eval) epoch x = dict(zip(self.keys, vals)) if self.csv: file = self.save_dir / 'results.csv' n = len(x) + 1 # number of cols s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header with open(file, 'a') as f: f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') if self.tb: for k, v in x.items(): self.tb.add_scalar(k, v, epoch) if self.wandb: if best_fitness == fi: best_results = [epoch] + vals[3:7] for i, name in enumerate(self.best_keys): self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary self.wandb.log(x) self.wandb.end_epoch(best_result=best_fitness == fi) def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): # Callback runs on model save event if self.wandb: if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) def on_train_end(self, last, best, plots, epoch, results): # Callback runs on training end if plots: plot_results(file=self.save_dir / 'results.csv') # save results.png files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}") if self.tb: for f in files: self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') if self.wandb: self.wandb.log(dict(zip(self.keys[3:10], results))) self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model if not self.opt.evolve: wandb.log_artifact( str(best if best.exists() else last), type='model', name=f'run_{self.wandb.wandb_run.id}_model', aliases=['latest', 'best', 'stripped'] ) self.wandb.finish_run() def on_params_update(self, params): # Update hyperparams or configs of the experiment # params: A dict containing {param: value} pairs if self.wandb: self.wandb.wandb_run.config.update(params, allow_val_change=True) ================================================ FILE: module/detect/utils/loggers/wandb/README.md ================================================ 📚 This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 🚀. UPDATED 29 September 2021. - [About Weights & Biases](#about-weights-&-biases) - [First-Time Setup](#first-time-setup) - [Viewing runs](#viewing-runs) - [Disabling wandb](#disabling-wandb) - [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) - [Reports: Share your work with the world!](#reports) ## About Weights & Biases Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models — architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: - [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time - [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4) visualized automatically - [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization - [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators - [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently - [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models ## First-Time Setup
Toggle Details When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: ```shell $ python trainfd.py --project ... --name ... ``` YOLOv5 notebook example: Open In Colab Open In Kaggle Screen Shot 2021-09-29 at 10 23 13 PM
## Viewing Runs
Toggle Details Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: - Training & Validation losses - Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 - Learning Rate over time - A bounding box debugging panel, showing the training progress over time - GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** - System: Disk I/0, CPU utilization, RAM memory usage - Your trained model as W&B Artifact - Environment: OS and Python types, Git repository and state, **training command**

Weights & Biases dashboard

## Disabling wandb - training after running `wandb disabled` inside that directory creates no wandb run ![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png) - To enable wandb again, run `wandb online` ![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png) ## Advanced Usage You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started.

1: TrainFD and Log Evaluation simultaneousy

This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, so no images will be uploaded from your system more than once.
Usage Code $ python trainfd.py --upload_data eval ![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png)

2. Visualize and Version Datasets

Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact.
Usage Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. ![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png)

3: TrainFD using dataset artifact

When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that can be used to train a model directly from the dataset artifact. This also logs evaluation
Usage Code $ python trainfd.py --data {data}_wandb.yaml ![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)

4: Save model checkpoints as artifacts

To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged
Usage Code $ python trainfd.py --save_period 1 ![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png)

5: Resume runs from checkpoint artifacts.

Any run can be resumed using artifacts if the --resume argument starts with wandb-artifact:// prefix followed by the run path, i.e, wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system.
Usage Code $ python trainfd.py --resume wandb-artifact://{run_path} ![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)

6: Resume runs from dataset artifact & checkpoint artifacts.

Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or train from _wandb.yaml file and set --save_period
Usage Code $ python trainfd.py --resume wandb-artifact://{run_path} ![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)

Reports

W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). Weights & Biases Reports ## Environments YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn) , [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls ## Status ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([trainfd.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([eval.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. ================================================ FILE: module/detect/utils/loggers/wandb/__init__.py ================================================ ================================================ FILE: module/detect/utils/loggers/wandb/log_dataset.py ================================================ import argparse from wandb_utils import WandbLogger from utils.general import LOGGER WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' def create_dataset_artifact(opt): logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused if not logger.wandb: LOGGER.info("install wandb using `pip install wandb` to log the dataset") if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') parser.add_argument('--entity', default=None, help='W&B entity') parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') opt = parser.parse_args() opt.resume = False # Explicitly disallow resume check for dataset upload job create_dataset_artifact(opt) ================================================ FILE: module/detect/utils/loggers/wandb/sweep.py ================================================ import sys from pathlib import Path import wandb FILE = Path(__file__).resolve() ROOT = FILE.parents[3] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH from train import parse_opt, train from utils.callbacks import Callbacks from utils.general import increment_path from utils.torch_utils import select_device def sweep(): wandb.init() # Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb. hyp_dict = vars(wandb.config).get("_items").copy() # Workaround: get necessary opt args opt = parse_opt(known=True) opt.batch_size = hyp_dict.get("batch_size") opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) opt.epochs = hyp_dict.get("epochs") opt.nosave = True opt.data = hyp_dict.get("data") opt.weights = str(opt.weights) opt.cfg = str(opt.cfg) opt.data = str(opt.data) opt.hyp = str(opt.hyp) opt.project = str(opt.project) device = select_device(opt.device, batch_size=opt.batch_size) # train train(hyp_dict, opt, device, callbacks=Callbacks()) if __name__ == "__main__": sweep() ================================================ FILE: module/detect/utils/loggers/wandb/sweep.yaml ================================================ # Hyperparameters for training # To set range- # Provide min and max values as: # parameter: # # min: scalar # max: scalar # OR # # Set a specific list of search space- # parameter: # values: [scalar1, scalar2, scalar3...] # # You can use grid, bayesian and hyperopt search strategy # For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration program: utils/loggers/wandb/sweep.py method: random metric: name: metrics/mAP_0.5 goal: maximize parameters: # hyperparameters: set either min, max range or values list data: value: "data/coco128.yaml" batch_size: values: [64] epochs: values: [10] lr0: distribution: uniform min: 1e-5 max: 1e-1 lrf: distribution: uniform min: 0.01 max: 1.0 momentum: distribution: uniform min: 0.6 max: 0.98 weight_decay: distribution: uniform min: 0.0 max: 0.001 warmup_epochs: distribution: uniform min: 0.0 max: 5.0 warmup_momentum: distribution: uniform min: 0.0 max: 0.95 warmup_bias_lr: distribution: uniform min: 0.0 max: 0.2 box: distribution: uniform min: 0.02 max: 0.2 cls: distribution: uniform min: 0.2 max: 4.0 cls_pw: distribution: uniform min: 0.5 max: 2.0 obj: distribution: uniform min: 0.2 max: 4.0 obj_pw: distribution: uniform min: 0.5 max: 2.0 iou_t: distribution: uniform min: 0.1 max: 0.7 anchor_t: distribution: uniform min: 2.0 max: 8.0 fl_gamma: distribution: uniform min: 0.0 max: 4.0 hsv_h: distribution: uniform min: 0.0 max: 0.1 hsv_s: distribution: uniform min: 0.0 max: 0.9 hsv_v: distribution: uniform min: 0.0 max: 0.9 degrees: distribution: uniform min: 0.0 max: 45.0 translate: distribution: uniform min: 0.0 max: 0.9 scale: distribution: uniform min: 0.0 max: 0.9 shear: distribution: uniform min: 0.0 max: 10.0 perspective: distribution: uniform min: 0.0 max: 0.001 flipud: distribution: uniform min: 0.0 max: 1.0 fliplr: distribution: uniform min: 0.0 max: 1.0 mosaic: distribution: uniform min: 0.0 max: 1.0 mixup: distribution: uniform min: 0.0 max: 1.0 copy_paste: distribution: uniform min: 0.0 max: 1.0 ================================================ FILE: module/detect/utils/loggers/wandb/wandb_utils.py ================================================ """Utilities and tools for tracking runs with Weights & Biases.""" import logging import os import sys from contextlib import contextmanager from pathlib import Path from typing import Dict import yaml from tqdm import tqdm FILE = Path(__file__).resolve() ROOT = FILE.parents[3] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH from utils.dataloaders import LoadImagesAndLabels, img2label_paths from utils.general import LOGGER, check_dataset, check_file try: import wandb assert hasattr(wandb, '__version__') # verify package import not local dir except (ImportError, AssertionError): wandb = None RANK = int(os.getenv('RANK', -1)) WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): return from_string[len(prefix):] def check_wandb_config_file(data_config_file): wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path if Path(wandb_config).is_file(): return wandb_config return data_config_file def check_wandb_dataset(data_file): is_trainset_wandb_artifact = False is_valset_wandb_artifact = False if check_file(data_file) and data_file.endswith('.yaml'): with open(data_file, errors='ignore') as f: data_dict = yaml.safe_load(f) is_trainset_wandb_artifact = isinstance( data_dict['train'], str ) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) is_valset_wandb_artifact = isinstance( data_dict['eval'], str ) and data_dict['eval'].startswith(WANDB_ARTIFACT_PREFIX) if is_trainset_wandb_artifact or is_valset_wandb_artifact: return data_dict else: return check_dataset(data_file) def get_run_info(run_path): run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) run_id = run_path.stem project = run_path.parent.stem entity = run_path.parent.parent.stem model_artifact_name = 'run_' + run_id + '_model' return entity, project, run_id, model_artifact_name def check_wandb_resume(opt): process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None if isinstance(opt.resume, str): if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): if RANK not in [-1, 0]: # For resuming DDP runs entity, project, run_id, model_artifact_name = get_run_info(opt.resume) api = wandb.Api() artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') modeldir = artifact.download() opt.weights = str(Path(modeldir) / "last.pt") return True return None def process_wandb_config_ddp_mode(opt): with open(check_file(opt.data), errors='ignore') as f: data_dict = yaml.safe_load(f) # data dict train_dir, val_dir = None, None if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): api = wandb.Api() train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) train_dir = train_artifact.download() train_path = Path(train_dir) / 'data/images/' data_dict['train'] = str(train_path) if isinstance(data_dict['eval'], str) and data_dict['eval'].startswith(WANDB_ARTIFACT_PREFIX): api = wandb.Api() val_artifact = api.artifact(remove_prefix(data_dict['eval']) + ':' + opt.artifact_alias) val_dir = val_artifact.download() val_path = Path(val_dir) / 'data/images/' data_dict['eval'] = str(val_path) if train_dir or val_dir: ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') with open(ddp_data_path, 'w') as f: yaml.safe_dump(data_dict, f) opt.data = ddp_data_path class WandbLogger(): """Log training runs, datasets, models, and predictions to Weights & Biases. This logger sends information to W&B at wandb.ai. By default, this information includes hyperparameters, system configuration and metrics, model metrics, and basic data metrics and analyses. By providing additional command line arguments to train.py, datasets, models and predictions can also be logged. For more on how this logger is used, see the Weights & Biases documentation: https://docs.wandb.com/guides/integrations/yolov5 """ def __init__(self, opt, run_id=None, job_type='Training'): """ - Initialize WandbLogger instance - Upload dataset if opt.upload_dataset is True - Setup trainig processes if job_type is 'Training' arguments: opt (namespace) -- Commandline arguments for this run run_id (str) -- Run ID of W&B run to be resumed job_type (str) -- To set the job_type for this run """ # Pre-training routine -- self.job_type = job_type self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run self.val_artifact, self.train_artifact = None, None self.train_artifact_path, self.val_artifact_path = None, None self.result_artifact = None self.val_table, self.result_table = None, None self.bbox_media_panel_images = [] self.val_table_path_map = None self.max_imgs_to_log = 16 self.wandb_artifact_data_dict = None self.data_dict = None # It's more elegant to stick to 1 wandb.init call, # but useful config data is overwritten in the WandbLogger's wandb.init call if isinstance(opt.resume, str): # checks resume from artifact if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): entity, project, run_id, model_artifact_name = get_run_info(opt.resume) model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name assert wandb, 'install wandb to resume wandb runs' # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config self.wandb_run = wandb.init( id=run_id, project=project, entity=entity, resume='allow', allow_val_change=True ) opt.resume = model_artifact_name elif self.wandb: self.wandb_run = wandb.init( config=opt, resume="allow", project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, entity=opt.entity, name=opt.name if opt.name != 'exp' else None, job_type=job_type, id=run_id, allow_val_change=True ) if not wandb.run else wandb.run if self.wandb_run: if self.job_type == 'Training': if opt.upload_dataset: if not opt.resume: self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) if opt.resume: # resume from artifact if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX): self.data_dict = dict(self.wandb_run.config.data_dict) else: # local resume self.data_dict = check_wandb_dataset(opt.data) else: self.data_dict = check_wandb_dataset(opt.data) self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, allow_val_change=True) self.setup_training(opt) if self.job_type == 'Dataset Creation': self.wandb_run.config.update({"upload_dataset": True}) self.data_dict = self.check_and_upload_dataset(opt) def check_and_upload_dataset(self, opt): """ Check if the dataset format is compatible and upload it as W&B artifact arguments: opt (namespace)-- Commandline arguments for current run returns: Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. """ assert wandb, 'Install wandb to upload dataset' config_path = self.log_dataset_artifact( opt.data, opt.single_cls, 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem ) with open(config_path, errors='ignore') as f: wandb_data_dict = yaml.safe_load(f) return wandb_data_dict def setup_training(self, opt): """ Setup the necessary processes for training YOLO models: - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded - Setup log_dict, initialize bbox_interval arguments: opt (namespace) -- commandline arguments for this run """ self.log_dict, self.current_epoch = {}, 0 self.bbox_interval = opt.bbox_interval if isinstance(opt.resume, str): modeldir, _ = self.download_model_artifact(opt) if modeldir: self.weights = Path(modeldir) / "last.pt" config = self.wandb_run.config opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( self.weights ), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ config.hyp, config.imgsz data_dict = self.data_dict if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download self.train_artifact_path, self.train_artifact = self.download_dataset_artifact( data_dict.get('train'), opt.artifact_alias ) self.val_artifact_path, self.val_artifact = self.download_dataset_artifact( data_dict.get('eval'), opt.artifact_alias ) if self.train_artifact_path is not None: train_path = Path(self.train_artifact_path) / 'data/images/' data_dict['train'] = str(train_path) if self.val_artifact_path is not None: val_path = Path(self.val_artifact_path) / 'data/images/' data_dict['eval'] = str(val_path) if self.val_artifact is not None: self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") columns = ["epoch", "id", "ground truth", "prediction"] columns.extend(self.data_dict['names']) self.result_table = wandb.Table(columns) self.val_table = self.val_artifact.get("eval") if self.val_table_path_map is None: self.map_val_table_path() if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 if opt.evolve or opt.noplots: self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None # Update the the data_dict to point to local artifacts dir if train_from_artifact: self.data_dict = data_dict def download_dataset_artifact(self, path, alias): """ download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX arguments: path -- path of the dataset to be used for training alias (str)-- alias of the artifact to be download/used for training returns: (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset is found otherwise returns (None, None) """ if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" datadir = dataset_artifact.download() return datadir, dataset_artifact return None, None def download_model_artifact(self, opt): """ download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX arguments: opt (namespace) -- Commandline arguments for this run """ if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' modeldir = model_artifact.download() # epochs_trained = model_artifact.metadata.get('epochs_trained') total_epochs = model_artifact.metadata.get('total_epochs') is_finished = total_epochs is None assert not is_finished, 'training is finished, can only resume incomplete runs.' return modeldir, model_artifact return None, None def log_model(self, path, opt, epoch, fitness_score, best_model=False): """ Log the model checkpoint as W&B artifact arguments: path (Path) -- Path of directory containing the checkpoints opt (namespace) -- Command line arguments for this run epoch (int) -- Current epoch number fitness_score (float) -- fitness score for current epoch best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. """ model_artifact = wandb.Artifact( 'run_' + wandb.run.id + '_model', type='model', metadata={ 'original_url': str(path), 'epochs_trained': epoch + 1, 'save period': opt.save_period, 'project': opt.project, 'total_epochs': opt.epochs, 'fitness_score': fitness_score} ) model_artifact.add_file(str(path / 'last.pt'), name='last.pt') wandb.log_artifact( model_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else ''] ) LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): """ Log the dataset as W&B artifact and return the new data file with W&B links arguments: data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. single_class (boolean) -- train multi-class data as single-class project (str) -- project name. Used to construct the artifact path overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new file with _wandb postfix. Eg -> data_wandb.yaml returns: the new .yaml file with artifact links. it can be used to start training directly from artifacts """ upload_dataset = self.wandb_run.config.upload_dataset log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'eval' self.data_dict = check_dataset(data_file) # parse and check data = dict(self.data_dict) nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) names = {k: v for k, v in enumerate(names)} # to index dictionary # log train set if not log_val_only: self.train_artifact = self.create_dataset_table( LoadImagesAndLabels(data['train'], rect=True, batch_size=1), names, name='train' ) if data.get('train') else None if data.get('train'): data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') self.val_artifact = self.create_dataset_table( LoadImagesAndLabels(data['eval'], rect=True, batch_size=1), names, name='eval' ) if data.get('eval') else None if data.get('eval'): data['eval'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'eval') path = Path(data_file) # create a _wandb.yaml file with artifacts links if both train and test set are logged if not log_val_only: path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path path = ROOT / 'data' / path data.pop('download', None) data.pop('path', None) with open(path, 'w') as f: yaml.safe_dump(data, f) LOGGER.info(f"Created dataset config file {path}") if self.job_type == 'Training': # builds correct artifact pipeline graph if not log_val_only: self.wandb_run.log_artifact( self.train_artifact ) # calling use_artifact downloads the dataset. NOT NEEDED! self.wandb_run.use_artifact(self.val_artifact) self.val_artifact.wait() self.val_table = self.val_artifact.get('eval') self.map_val_table_path() else: self.wandb_run.log_artifact(self.train_artifact) self.wandb_run.log_artifact(self.val_artifact) return path def map_val_table_path(self): """ Map the validation dataset Table like name of file -> it's id in the W&B Table. Useful for - referencing artifacts for evaluation. """ self.val_table_path_map = {} LOGGER.info("Mapping dataset") for i, data in enumerate(tqdm(self.val_table.data)): self.val_table_path_map[data[3]] = data[0] def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'): """ Create and return W&B artifact containing W&B Table of the dataset. arguments: dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table class_to_id -- hash map that maps class ids to labels name -- name of the artifact returns: dataset artifact to be logged or used """ # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging artifact = wandb.Artifact(name=name, type="dataset") img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None img_files = tqdm(dataset.im_files) if not img_files else img_files for img_file in img_files: if Path(img_file).is_dir(): artifact.add_dir(img_file, name='data/images') labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) artifact.add_dir(labels_path, name='data/labels') else: artifact.add_file(img_file, name='data/images/' + Path(img_file).name) label_file = Path(img2label_paths([img_file])[0]) artifact.add_file( str(label_file), name='data/labels/' + label_file.name ) if label_file.exists() else None table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): box_data, img_classes = [], {} for cls, *xywh in labels[:, 1:].tolist(): cls = int(cls) box_data.append( { "position": { "middle": [xywh[0], xywh[1]], "width": xywh[2], "height": xywh[3]}, "class_id": cls, "box_caption": "%s" % (class_to_id[cls])} ) img_classes[cls] = class_to_id[cls] boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space table.add_data( si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), Path(paths).name ) artifact.add(table, name) return artifact def log_training_progress(self, predn, path, names): """ Build evaluation Table. Uses reference from validation dataset table. arguments: predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] path (str): local path of the current evaluation image names (dict(int, str)): hash map that maps class ids to labels """ class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) box_data = [] avg_conf_per_class = [0] * len(self.data_dict['names']) pred_class_count = {} for *xyxy, conf, cls in predn.tolist(): if conf >= 0.25: cls = int(cls) box_data.append( { "position": { "minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": cls, "box_caption": f"{names[cls]} {conf:.3f}", "scores": { "class_score": conf}, "domain": "pixel"} ) avg_conf_per_class[cls] += conf if cls in pred_class_count: pred_class_count[cls] += 1 else: pred_class_count[cls] = 1 for pred_class in pred_class_count.keys(): avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space id = self.val_table_path_map[Path(path).name] self.result_table.add_data( self.current_epoch, id, self.val_table.data[id][1], wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), *avg_conf_per_class ) def val_one_image(self, pred, predn, path, names, im): """ Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel arguments: pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] path (str): local path of the current evaluation image """ if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact self.log_training_progress(predn, path, names) if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: if self.current_epoch % self.bbox_interval == 0: box_data = [{ "position": { "minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": f"{names[int(cls)]} {conf:.3f}", "scores": { "class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) def log(self, log_dict): """ save the metrics to the logging dictionary arguments: log_dict (Dict) -- metrics/media to be logged in current step """ if self.wandb_run: for key, value in log_dict.items(): self.log_dict[key] = value def end_epoch(self, best_result=False): """ commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. arguments: best_result (boolean): Boolean representing if the result of this evaluation is best or not """ if self.wandb_run: with all_logging_disabled(): if self.bbox_media_panel_images: self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images try: wandb.log(self.log_dict) except BaseException as e: LOGGER.info( f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}" ) self.wandb_run.finish() self.wandb_run = None self.log_dict = {} self.bbox_media_panel_images = [] if self.result_artifact: self.result_artifact.add(self.result_table, 'result') wandb.log_artifact( self.result_artifact, aliases=[ 'latest', 'last', 'epoch ' + str(self.current_epoch), ('best' if best_result else '')] ) wandb.log({"evaluation": self.result_table}) columns = ["epoch", "id", "ground truth", "prediction"] columns.extend(self.data_dict['names']) self.result_table = wandb.Table(columns) self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): """ Log metrics if any and finish the current W&B run """ if self.wandb_run: if self.log_dict: with all_logging_disabled(): wandb.log(self.log_dict) wandb.run.finish() @contextmanager def all_logging_disabled(highest_level=logging.CRITICAL): """ source - https://gist.github.com/simon-weber/7853144 A context manager that will prevent any logging messages triggered during the body from being processed. :param highest_level: the maximum logging level in use. This would only need to be changed if a custom level greater than CRITICAL is defined. """ previous_level = logging.root.manager.disable logging.disable(highest_level) try: yield finally: logging.disable(previous_level) ================================================ FILE: module/detect/utils/loss.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Loss functions """ import torch import torch.nn as nn from utils.metrics import bbox_iou from utils.torch_utils import de_parallel def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 # return positive, negative label smoothing BCE targets return 1.0 - 0.5 * eps, 0.5 * eps class BCEBlurWithLogitsLoss(nn.Module): # BCEwithLogitLoss() with reduced missing label effects. def __init__(self, alpha=0.05): super().__init__() self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() self.alpha = alpha def forward(self, pred, true): loss = self.loss_fcn(pred, true) pred = torch.sigmoid(pred) # prob from logits dx = pred - true # reduce only missing label effects # dx = (pred - true).abs() # reduce missing label and false label effects alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) loss *= alpha_factor return loss.mean() class FocalLoss(nn.Module): # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): super().__init__() self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() self.gamma = gamma self.alpha = alpha self.reduction = loss_fcn.reduction self.loss_fcn.reduction = 'none' # required to apply FL to each element def forward(self, pred, true): loss = self.loss_fcn(pred, true) # p_t = torch.exp(-loss) # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py pred_prob = torch.sigmoid(pred) # prob from logits p_t = true * pred_prob + (1 - true) * (1 - pred_prob) alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) modulating_factor = (1.0 - p_t) ** self.gamma loss *= alpha_factor * modulating_factor if self.reduction == 'mean': return loss.mean() elif self.reduction == 'sum': return loss.sum() else: # 'none' return loss class QFocalLoss(nn.Module): # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): super().__init__() self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() self.gamma = gamma self.alpha = alpha self.reduction = loss_fcn.reduction self.loss_fcn.reduction = 'none' # required to apply FL to each element def forward(self, pred, true): loss = self.loss_fcn(pred, true) pred_prob = torch.sigmoid(pred) # prob from logits alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) modulating_factor = torch.abs(true - pred_prob) ** self.gamma loss *= alpha_factor * modulating_factor if self.reduction == 'mean': return loss.mean() elif self.reduction == 'sum': return loss.sum() else: # 'none' return loss class ComputeLoss: sort_obj_iou = False # Compute losses def __init__(self, model, autobalance=False): device = next(model.parameters()).device # get model device h = model.hyp # hyperparameters # Define criteria BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets # Focal loss g = h['fl_gamma'] # focal loss gamma if g > 0: BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) m = de_parallel(model).model[-1] # Detect() module self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance self.na = m.na # number of anchors self.nc = m.nc # number of classes self.nl = m.nl # number of layers self.anchors = m.anchors self.device = device def __call__(self, p, targets): # predictions, targets lcls = torch.zeros(1, device=self.device) # class loss lbox = torch.zeros(1, device=self.device) # box loss lobj = torch.zeros(1, device=self.device) # object loss tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets # Losses for i, pi in enumerate(p): # layer index, layer predictions b, a, gj, gi = indices[i] # image, anchor, gridy, gridx tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj n = b.shape[0] # number of targets if n: # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions # Regression pxy = pxy.sigmoid() * 2 - 0.5 pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] pbox = torch.cat((pxy, pwh), 1) # predicted box iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) lbox += (1.0 - iou).mean() # iou loss # Objectness iou = iou.detach().clamp(0).type(tobj.dtype) if self.sort_obj_iou: j = iou.argsort() b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] if self.gr < 1: iou = (1.0 - self.gr) + self.gr * iou tobj[b, a, gj, gi] = iou # iou ratio # Classification if self.nc > 1: # cls loss (only if multiple classes) t = torch.full_like(pcls, self.cn, device=self.device) # targets t[range(n), tcls[i]] = self.cp lcls += self.BCEcls(pcls, t) # BCE # Append targets to text file # with open('targets.txt', 'a') as file: # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] obji = self.BCEobj(pi[..., 4], tobj) lobj += obji * self.balance[i] # obj loss if self.autobalance: self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() if self.autobalance: self.balance = [x / self.balance[self.ssi] for x in self.balance] lbox *= self.hyp['box'] lobj *= self.hyp['obj'] lcls *= self.hyp['cls'] bs = tobj.shape[0] # batch size return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() def build_targets(self, p, targets): # Build targets for compute_loss(), input targets(image,class,x,y,w,h) na, nt = self.na, targets.shape[0] # number of anchors, targets tcls, tbox, indices, anch = [], [], [], [] gain = torch.ones(7, device=self.device) # normalized to gridspace gain ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None]), 2) # append anchor indices g = 0.5 # bias off = torch.tensor( [ [0, 0], [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm ], device=self.device).float() * g # offsets for i in range(self.nl): anchors, shape = self.anchors[i], p[i].shape gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain # Match targets to anchors t = targets * gain # shape(3,n,7) if nt: # Matches r = t[..., 4:6] / anchors[:, None] # wh ratio j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) t = t[j] # filter # Offsets gxy = t[:, 2:4] # grid xy gxi = gain[[2, 3]] - gxy # inverse j, k = ((gxy % 1 < g) & (gxy > 1)).T l, m = ((gxi % 1 < g) & (gxi > 1)).T j = torch.stack((torch.ones_like(j), j, k, l, m)) t = t.repeat((5, 1, 1))[j] offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] else: t = targets[0] offsets = 0 # Define bc, gxy, gwh, a = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors a, (b, c) = a.long().view(-1), bc.long().T # anchors, image, class gij = (gxy - offsets).long() gi, gj = gij.T # grid indices # Append indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid tbox.append(torch.cat((gxy - gij, gwh), 1)) # box anch.append(anchors[a]) # anchors tcls.append(c) # class return tcls, tbox, indices, anch ================================================ FILE: module/detect/utils/metrics.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Model validation metrics """ import math import warnings from pathlib import Path import matplotlib.pyplot as plt import numpy as np import torch def fitness(x): # Model fitness as a weighted combination of metrics w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] return (x[:, :4] * w).sum(1) def smooth(y, f=0.05): # Box filter of fraction f nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd) p = np.ones(nf // 2) # ones padding yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments tp: True positives (nparray, nx1 or nx10). conf: Objectness value from 0-1 (nparray). pred_cls: Predicted object classes (nparray). target_cls: True object classes (nparray). plot: Plot precision-recall curve at mAP@0.5 save_dir: Plot save directory # Returns The average precision as computed in py-faster-rcnn. """ # Sort by objectness i = np.argsort(-conf) tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] # Find unique classes unique_classes, nt = np.unique(target_cls, return_counts=True) nc = unique_classes.shape[0] # number of classes, number of detections # Create Precision-Recall curve and compute AP for each class px, py = np.linspace(0, 1, 1000), [] # for plotting ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) for ci, c in enumerate(unique_classes): i = pred_cls == c n_l = nt[ci] # number of labels n_p = i.sum() # number of predictions if n_p == 0 or n_l == 0: continue # Accumulate FPs and TPs fpc = (1 - tp[i]).cumsum(0) tpc = tp[i].cumsum(0) # Recall recall = tpc / (n_l + eps) # recall curve r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases # Precision precision = tpc / (tpc + fpc) # precision curve p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score # AP from recall-precision curve for j in range(tp.shape[1]): ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) if plot and j == 0: py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 # Compute F1 (harmonic mean of precision and recall) f1 = 2 * p * r / (p + r + eps) names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data names = dict(enumerate(names)) # to dict if plot: plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') i = smooth(f1.mean(0), 0.1).argmax() # max F1 index p, r, f1 = p[:, i], r[:, i], f1[:, i] tp = (r * nt).round() # true positives fp = (tp / (p + eps) - tp).round() # false positives return tp, fp, p, r, f1, ap, unique_classes.astype(int) def compute_ap(recall, precision): """ Compute the average precision, given the recall and precision curves # Arguments recall: The recall curve (list) precision: The precision curve (list) # Returns Average precision, precision curve, recall curve """ # Append sentinel values to beginning and end mrec = np.concatenate(([0.0], recall, [1.0])) mpre = np.concatenate(([1.0], precision, [0.0])) # Compute the precision envelope mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) # Integrate area under curve method = 'interp' # methods: 'continuous', 'interp' if method == 'interp': x = np.linspace(0, 1, 101) # 101-point interp (COCO) ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate else: # 'continuous' i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve return ap, mpre, mrec class ConfusionMatrix: # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix def __init__(self, nc, conf=0.25, iou_thres=0.45): self.matrix = np.zeros((nc + 1, nc + 1)) self.nc = nc # number of classes self.conf = conf self.iou_thres = iou_thres def process_batch(self, detections, labels): """ Return intersection-over-union (Jaccard index) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, y2) format. Arguments: detections (Array[N, 6]), x1, y1, x2, y2, conf, class labels (Array[M, 5]), class, x1, y1, x2, y2 Returns: None, updates confusion matrix accordingly """ detections = detections[detections[:, 4] > self.conf] gt_classes = labels[:, 0].int() detection_classes = detections[:, 5].int() iou = box_iou(labels[:, 1:], detections[:, :4]) x = torch.where(iou > self.iou_thres) if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] else: matches = np.zeros((0, 3)) n = matches.shape[0] > 0 m0, m1, _ = matches.transpose().astype(int) for i, gc in enumerate(gt_classes): j = m0 == i if n and sum(j) == 1: self.matrix[detection_classes[m1[j]], gc] += 1 # correct else: self.matrix[self.nc, gc] += 1 # background FP if n: for i, dc in enumerate(detection_classes): if not any(m1 == i): self.matrix[dc, self.nc] += 1 # background FN def matrix(self): return self.matrix def tp_fp(self): tp = self.matrix.diagonal() # true positives fp = self.matrix.sum(1) - tp # false positives # fn = self.matrix.sum(0) - tp # false negatives (missed detections) return tp[:-1], fp[:-1] # remove background class def plot(self, normalize=True, save_dir='', names=()): try: import seaborn as sn array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) fig = plt.figure(figsize=(12, 9), tight_layout=True) nc, nn = self.nc, len(names) # number of classes, names sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered sn.heatmap(array, annot=nc < 30, annot_kws={ "size": 8}, cmap='Blues', fmt='.2f', square=True, vmin=0.0, xticklabels=names + ['background FP'] if labels else "auto", yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) fig.axes[0].set_xlabel('True') fig.axes[0].set_ylabel('Predicted') fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) plt.close() except Exception as e: print(f'WARNING: ConfusionMatrix plot failure: {e}') def print(self): for i in range(self.nc + 1): print(' '.join(map(str, self.matrix[i]))) def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4) # Get the coordinates of bounding boxes if xywh: # transform from xywh to xyxy (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, 1), box2.chunk(4, 1) w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ else: # x1, y1, x2, y2 = box1 b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, 1) b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, 1) w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 # Intersection area inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) # Union Area union = w1 * h1 + w2 * h2 - inter + eps # IoU iou = inter / union if CIoU or DIoU or GIoU: cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU return iou - rho2 / c2 # DIoU c_area = cw * ch + eps # convex area return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf return iou # IoU def box_area(box): # box = xyxy(4,n) return (box[2] - box[0]) * (box[3] - box[1]) def box_iou(box1, box2, eps=1e-7): # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py """ Return intersection-over-union (Jaccard index) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, y2) format. Arguments: box1 (Tensor[N, 4]) box2 (Tensor[M, 4]) Returns: iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 """ # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) (a1, a2), (b1, b2) = box1[:, None].chunk(2, 2), box2.chunk(2, 1) inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) # IoU = inter / (area1 + area2 - inter) return inter / (box_area(box1.T)[:, None] + box_area(box2.T) - inter + eps) def bbox_ioa(box1, box2, eps=1e-7): """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 box1: np.array of shape(4) box2: np.array of shape(nx4) returns: np.array of shape(n) """ # Get the coordinates of bounding boxes b1_x1, b1_y1, b1_x2, b1_y2 = box1 b2_x1, b2_y1, b2_x2, b2_y2 = box2.T # Intersection area inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) # box2 area box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps # Intersection over box2 area return inter_area / box2_area def wh_iou(wh1, wh2, eps=1e-7): # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 wh1 = wh1[:, None] # [N,1,2] wh2 = wh2[None] # [1,M,2] inter = torch.min(wh1, wh2).prod(2) # [N,M] return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps) # iou = inter / (area1 + area2 - inter) # Plots ---------------------------------------------------------------------------------------------------------------- def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): # Precision-recall curve fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) py = np.stack(py, axis=1) if 0 < len(names) < 21: # display per-class legend if < 21 classes for i, y in enumerate(py.T): ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) else: ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) ax.set_xlabel('Recall') ax.set_ylabel('Precision') ax.set_xlim(0, 1) ax.set_ylim(0, 1) plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") fig.savefig(save_dir, dpi=250) plt.close() def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'): # Metric-confidence curve fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) if 0 < len(names) < 21: # display per-class legend if < 21 classes for i, y in enumerate(py): ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) else: ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) y = smooth(py.mean(0), 0.05) ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xlim(0, 1) ax.set_ylim(0, 1) plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") fig.savefig(save_dir, dpi=250) plt.close() ================================================ FILE: module/detect/utils/plots.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Plotting utils """ import math import os from copy import copy from pathlib import Path from urllib.error import URLError import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sn import torch from PIL import Image, ImageDraw, ImageFont from utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords, increment_path, is_ascii, threaded, try_except, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness # Settings RANK = int(os.getenv('RANK', -1)) matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') # for writing to files only class Colors: # Ultralytics color palette https://ultralytics.com/ def __init__(self): # hex = matplotlib.colors.TABLEAU_COLORS.values() hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') self.palette = [self.hex2rgb(f'#{c}') for c in hexs] self.n = len(self.palette) def __call__(self, i, bgr=False): c = self.palette[int(i) % self.n] return (c[2], c[1], c[0]) if bgr else c @staticmethod def hex2rgb(h): # rgb order (PIL) return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) colors = Colors() # create instance for 'from utils.plots import colors' def check_pil_font(font=FONT, size=10): # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary font = Path(font) font = font if font.exists() else (CONFIG_DIR / font.name) try: return ImageFont.truetype(str(font) if font.exists() else font.name, size) except Exception: # download if missing try: check_font(font) return ImageFont.truetype(str(font), size) except TypeError: check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 except URLError: # not online return ImageFont.load_default() class Annotator: # YOLOv5 Annotator for train/eval mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic self.pil = pil or non_ascii if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) self.font = check_pil_font( font='Arial.Unicode.ttf' if non_ascii else font, size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12) ) else: # use cv2 self.im = im self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): # Add one xyxy box to image with label if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: w, h = self.font.getsize(label) # text width, height outside = box[1] - h >= 0 # label fits outside box self.draw.rectangle( (box[0], box[1] - h if outside else box[1], box[0] + w + 1, box[1] + 1 if outside else box[1] + h + 1), fill=color, ) # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) else: # cv2 p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) if label: tf = max(self.lw - 1, 1) # font thickness w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height outside = p1[1] - h >= 3 p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled cv2.putText( self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA ) def rectangle(self, xy, fill=None, outline=None, width=1): # Add rectangle to image (PIL-only) self.draw.rectangle(xy, fill, outline, width) def text(self, xy, text, txt_color=(255, 255, 255)): # Add text to image (PIL-only) w, h = self.font.getsize(text) # text width, height self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font) def result(self): # Return annotated image as array return np.asarray(self.im) def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): """ x: Features to be visualized module_type: Module type stage: Module stage within model n: Maximum number of feature maps to plot save_dir: Directory to save results """ if 'Detect' not in module_type: batch, channels, height, width = x.shape # batch, channels, height, width if height > 1 and width > 1: f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels n = min(n, channels) # number of plots fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols ax = ax.ravel() plt.subplots_adjust(wspace=0.05, hspace=0.05) for i in range(n): ax[i].imshow(blocks[i].squeeze()) # cmap='gray' ax[i].axis('off') LOGGER.info(f'Saving {f}... ({n}/{channels})') plt.savefig(f, dpi=300, bbox_inches='tight') plt.close() np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save def hist2d(x, y, n=100): # 2d histogram used in labels.png and evolve.png xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) return np.log(hist[xidx, yidx]) def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): from scipy.signal import butter, filtfilt # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy def butter_lowpass(cutoff, fs, order): nyq = 0.5 * fs normal_cutoff = cutoff / nyq return butter(order, normal_cutoff, btype='low', analog=False) b, a = butter_lowpass(cutoff, fs, order=order) return filtfilt(b, a, data) # forward-backward filter def output_to_target(output): # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] targets = [] for i, o in enumerate(output): for *box, conf, cls in o.cpu().numpy(): targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) return np.array(targets) @threaded def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): # Plot image grid with labels if isinstance(images, torch.Tensor): images = images.cpu().float().numpy() if isinstance(targets, torch.Tensor): targets = targets.cpu().numpy() if np.max(images[0]) <= 1: images *= 255 # de-normalise (optional) bs, _, h, w = images.shape # batch size, _, height, width bs = min(bs, max_subplots) # limit plot images ns = np.ceil(bs ** 0.5) # number of subplots (square) # Build Image mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init for i, im in enumerate(images): if i == max_subplots: # if last batch has fewer images than we expect break x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin im = im.transpose(1, 2, 0) mosaic[y:y + h, x:x + w, :] = im # Resize (optional) scale = max_size / ns / max(h, w) if scale < 1: h = math.ceil(scale * h) w = math.ceil(scale * w) mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) # Annotate fs = int((h + w) * ns * 0.01) # font size annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) for i in range(i + 1): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(targets) > 0: ti = targets[targets[:, 0] == i] # image targets boxes = xywh2xyxy(ti[:, 2:6]).T classes = ti[:, 1].astype('int') labels = ti.shape[1] == 6 # labels if no conf column conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) if boxes.shape[1]: if boxes.max() <= 1.01: # if normalized with tolerance 0.01 boxes[[0, 2]] *= w # scale to pixels boxes[[1, 3]] *= h elif scale < 1: # absolute coords need scale if image scales boxes *= scale boxes[[0, 2]] += x boxes[[1, 3]] += y for j, box in enumerate(boxes.T.tolist()): cls = classes[j] color = colors(cls) cls = names[cls] if names else cls if labels or conf[j] > 0.25: # 0.25 conf thresh label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' annotator.box_label(box, label, color=color) annotator.im.save(fname) # save def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): # Plot LR simulating training for full epochs optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals y = [] for _ in range(epochs): scheduler.step() y.append(optimizer.param_groups[0]['lr']) plt.plot(y, '.-', label='LR') plt.xlabel('epoch') plt.ylabel('LR') plt.grid() plt.xlim(0, epochs) plt.ylim(0) plt.savefig(Path(save_dir) / 'LR.png', dpi=200) plt.close() def plot_val_txt(): # from utils.plots import *; plot_val() # Plot eval.txt histograms x = np.loadtxt('eval.txt', dtype=np.float32) box = xyxy2xywh(x[:, :4]) cx, cy = box[:, 0], box[:, 1] fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) ax.set_aspect('equal') plt.savefig('hist2d.png', dpi=300) fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) ax[0].hist(cx, bins=600) ax[1].hist(cy, bins=600) plt.savefig('hist1d.png', dpi=200) def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() # Plot targets.txt histograms x = np.loadtxt('targets.txt', dtype=np.float32).T s = ['x targets', 'y targets', 'width targets', 'height targets'] fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) ax = ax.ravel() for i in range(4): ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') ax[i].legend() ax[i].set_title(s[i]) plt.savefig('targets.jpg', dpi=200) def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() # Plot file=study.txt generated by eval.py (or plot all study*.txt in dir) save_dir = Path(file).parent if file else Path(dir) plot2 = False # plot additional results if plot2: ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: for f in sorted(save_dir.glob('study*.txt')): y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T x = np.arange(y.shape[1]) if x is None else np.array(x) if plot2: s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] for i in range(7): ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) ax[i].set_title(s[i]) j = y[3].argmax() + 1 ax2.plot( y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO') ) ax2.plot( 1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet' ) ax2.grid(alpha=0.2) ax2.set_yticks(np.arange(20, 60, 5)) ax2.set_xlim(0, 57) ax2.set_ylim(25, 55) ax2.set_xlabel('GPU Speed (ms/img)') ax2.set_ylabel('COCO AP eval') ax2.legend(loc='lower right') f = save_dir / 'study.png' print(f'Saving {f}...') plt.savefig(f, dpi=300) @try_except # known issue https://github.com/ultralytics/yolov5/issues/5395 @Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611 def plot_labels(labels, names=(), save_dir=Path('')): # plot dataset labels LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes nc = int(c.max() + 1) # number of classes x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) # seaborn correlogram sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) plt.close() # matplotlib labels matplotlib.use('svg') # faster ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) try: # color histogram bars by class [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 except Exception: pass ax[0].set_ylabel('instances') if 0 < len(names) < 30: ax[0].set_xticks(range(len(names))) ax[0].set_xticklabels(names, rotation=90, fontsize=10) else: ax[0].set_xlabel('classes') sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) # rectangles labels[:, 1:3] = 0.5 # center labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) for cls, *box in labels[:1000]: ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot ax[1].imshow(img) ax[1].axis('off') for a in [0, 1, 2, 3]: for s in ['top', 'right', 'left', 'bottom']: ax[a].spines[s].set_visible(False) plt.savefig(save_dir / 'labels.jpg', dpi=200) matplotlib.use('Agg') plt.close() def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() # Plot evolve.csv hyp evolution results evolve_csv = Path(evolve_csv) data = pd.read_csv(evolve_csv) keys = [x.strip() for x in data.columns] x = data.values f = fitness(x) j = np.argmax(f) # max fitness index plt.figure(figsize=(10, 12), tight_layout=True) matplotlib.rc('font', **{'size': 8}) print(f'Best results from row {j} of {evolve_csv}:') for i, k in enumerate(keys[7:]): v = x[:, 7 + i] mu = v[j] # best single result plt.subplot(6, 5, i + 1) plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') plt.plot(mu, f.max(), 'k+', markersize=15) plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters if i % 5 != 0: plt.yticks([]) print(f'{k:>15}: {mu:.3g}') f = evolve_csv.with_suffix('.png') # filename plt.savefig(f, dpi=200) plt.close() print(f'Saved {f}') def plot_results(file='path/to/results.csv', dir=''): # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) ax = ax.ravel() files = list(save_dir.glob('results*.csv')) assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' for f in files: try: data = pd.read_csv(f) s = [x.strip() for x in data.columns] x = data.values[:, 0] for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): y = data.values[:, j].astype('float') # y[y == 0] = np.nan # don't show zero values ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) ax[i].set_title(s[j], fontsize=12) # if j in [8, 9, 10]: # share train and eval loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: LOGGER.info(f'Warning: Plotting error for {f}: {e}') ax[1].legend() fig.savefig(save_dir / 'results.png', dpi=200) plt.close() def profile_idetection(start=0, stop=0, labels=(), save_dir=''): # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] files = list(Path(save_dir).glob('frames*.txt')) for fi, f in enumerate(files): try: results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows n = results.shape[1] # number of rows x = np.arange(start, min(stop, n) if stop else n) results = results[:, x] t = (results[0] - results[0].min()) # set t0=0s results[0] = x for i, a in enumerate(ax): if i < len(results): label = labels[fi] if len(labels) else f.stem.replace('frames_', '') a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) a.set_title(s[i]) a.set_xlabel('time (s)') # if fi == len(files) - 1: # a.set_ylim(bottom=0) for side in ['top', 'right']: a.spines[side].set_visible(False) else: a.remove() except Exception as e: print(f'Warning: Plotting error for {f}; {e}') ax[1].legend() plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop xyxy = torch.tensor(xyxy).view(-1, 4) b = xyxy2xywh(xyxy) # boxes if square: b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad xyxy = xywh2xyxy(b).long() clip_coords(xyxy, im.shape) crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] if save: file.parent.mkdir(parents=True, exist_ok=True) # make directory f = str(increment_path(file).with_suffix('.jpg')) # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB return crop ================================================ FILE: module/detect/utils/torch_utils.py ================================================ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ PyTorch utils """ import math import os import platform import subprocess import time import warnings from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP from utils.general import LOGGER, check_version, colorstr, file_date, git_describe LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) try: import thop # for FLOPs computation except ImportError: thop = None # Suppress PyTorch warnings warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling') def smart_DDP(model): # Model DDP creation with checks assert not check_version(torch.__version__, '1.12.0', pinned=True), \ 'torch==1.12.0 torchvision==0.13.0 DDP training is not supported due to a known issue. ' \ 'Please upgrade or downgrade torch to use DDP. See https://github.com/ultralytics/yolov5/issues/8395' if check_version(torch.__version__, '1.11.0'): return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True) else: return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) @contextmanager def torch_distributed_zero_first(local_rank: int): # Decorator to make all processes in distributed training wait for each local_master to do something if local_rank not in [-1, 0]: dist.barrier(device_ids=[local_rank]) yield if local_rank == 0: dist.barrier(device_ids=[0]) def device_count(): # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Supports Linux and Windows assert platform.system() in ('Linux', 'Windows'), 'device_count() only supported on Linux or Windows' try: cmd = 'nvidia-smi -L | wc -l' if platform.system() == 'Linux' else 'nvidia-smi -L | find /c /v ""' # Windows return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]) except Exception: return 0 def select_device(device='', batch_size=0, newline=True): # device = None or 'cpu' or 0 or '0' or '0,1,2,3' s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} ' device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0' cpu = device == 'cpu' mps = device == 'mps' # Apple Metal Performance Shaders (MPS) if cpu or mps: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" if not (cpu or mps) and torch.cuda.is_available(): # prefer GPU if available devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 n = len(devices) # device count if n > 1 and batch_size > 0: # check batch_size is divisible by device_count assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' space = ' ' * (len(s) + 1) for i, d in enumerate(devices): p = torch.cuda.get_device_properties(i) s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB arg = 'cuda:0' elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available s += 'MPS\n' arg = 'mps' else: # revert to CPU s += 'CPU\n' arg = 'cpu' if not newline: s = s.rstrip() LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe return torch.device(arg) def time_sync(): # PyTorch-accurate time if torch.cuda.is_available(): torch.cuda.synchronize() return time.time() def profile(input, ops, n=10, device=None): # YOLOv5 speed/memory/FLOPs profiler # # Usage: # input = torch.randn(16, 3, 640, 640) # m1 = lambda x: x * torch.sigmoid(x) # m2 = nn.SiLU() # profile(input, [m1, m2], n=100) # profile over 100 iterations results = [] if not isinstance(device, torch.device): device = select_device(device) print( f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" f"{'input':>24s}{'output':>24s}" ) for x in input if isinstance(input, list) else [input]: x = x.to(device) x.requires_grad = True for m in ops if isinstance(ops, list) else [ops]: m = m.to(device) if hasattr(m, 'to') else m # device m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward try: flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs except Exception: flops = 0 try: for _ in range(n): t[0] = time_sync() y = m(x) t[1] = time_sync() try: _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() t[2] = time_sync() except Exception: # no backward method # print(e) # for debug t[2] = float('nan') tf += (t[1] - t[0]) * 1000 / n # ms per op forward tb += (t[2] - t[1]) * 1000 / n # ms per op backward mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') results.append([p, flops, mem, tf, tb, s_in, s_out]) except Exception as e: print(e) results.append(None) torch.cuda.empty_cache() return results def is_parallel(model): # Returns True if model is of type DP or DDP return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) def de_parallel(model): # De-parallelize a model: returns single-GPU model if model is of type DP or DDP return model.module if is_parallel(model) else model def initialize_weights(model): for m in model.modules(): t = type(m) if t is nn.Conv2d: pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif t is nn.BatchNorm2d: m.eps = 1e-3 m.momentum = 0.03 elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: m.inplace = True def find_modules(model, mclass=nn.Conv2d): # Finds layer indices matching module class 'mclass' return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] def sparsity(model): # Return global model sparsity a, b = 0, 0 for p in model.parameters(): a += p.numel() b += (p == 0).sum() return b / a def prune(model, amount=0.3): # Prune model to requested global sparsity import torch.nn.utils.prune as prune print('Pruning model... ', end='') for name, m in model.named_modules(): if isinstance(m, nn.Conv2d): prune.l1_unstructured(m, name='weight', amount=amount) # prune prune.remove(m, 'weight') # make permanent print(' %.3g global sparsity' % sparsity(model)) def fuse_conv_and_bn(conv, bn): # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ fusedconv = nn.Conv2d( conv.in_channels, conv.out_channels, kernel_size=conv.kernel_size, stride=conv.stride, padding=conv.padding, groups=conv.groups, bias=True ).requires_grad_(False).to(conv.weight.device) # Prepare filters w_conv = conv.weight.clone().view(conv.out_channels, -1) w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) # Prepare spatial bias b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) return fusedconv def model_info(model, verbose=False, img_size=640): # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] n_p = sum(x.numel() for x in model.parameters()) # number parameters n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients if verbose: print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") for i, (name, p) in enumerate(model.named_parameters()): name = name.replace('module_list.', '') print( '%5g %40s %9s %12g %20s %10.3g %10.3g' % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()) ) try: # FLOPs from thop import profile stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs except Exception: fs = '' name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) # Scales img(bs,3,y,x) by ratio constrained to gs-multiple if ratio == 1.0: return img h, w = img.shape[2:] s = (int(h * ratio), int(w * ratio)) # new size img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize if not same_shape: # pad/crop img h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean def copy_attr(a, b, include=(), exclude=()): # Copy attributes from b to a, options to only include [...] and to exclude [...] for k, v in b.__dict__.items(): if (len(include) and k not in include) or k.startswith('_') or k in exclude: continue else: setattr(a, k, v) def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, weight_decay=1e-5): # YOLOv5 3-param group optimizer: 0) weights with decay, 1) weights no decay, 2) biases no decay g = [], [], [] # optimizer parameter groups bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() for v in model.modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias (no decay) g[2].append(v.bias) if isinstance(v, bn): # weight (no decay) g[1].append(v.weight) elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) g[0].append(v.weight) if name == 'Adam': optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999)) # adjust beta1 to momentum elif name == 'AdamW': optimizer = torch.optim.AdamW(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0) elif name == 'RMSProp': optimizer = torch.optim.RMSprop(g[2], lr=lr, momentum=momentum) elif name == 'SGD': optimizer = torch.optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True) else: raise NotImplementedError(f'Optimizer {name} not implemented.') optimizer.add_param_group({'params': g[0], 'weight_decay': weight_decay}) # add g0 with weight_decay optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) LOGGER.info( f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups " f"{len(g[1])} weight (no decay), {len(g[0])} weight, {len(g[2])} bias" ) return optimizer class EarlyStopping: # YOLOv5 simple early stopper def __init__(self, patience=30): self.best_fitness = 0.0 # i.e. mAP self.best_epoch = 0 self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop self.possible_stop = False # possible stop may occur next epoch def __call__(self, epoch, fitness): if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training self.best_epoch = epoch self.best_fitness = fitness delta = epoch - self.best_epoch # epochs without improvement self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch stop = delta >= self.patience # stop training if patience exceeded if stop: LOGGER.info( f'Stopping training early as no improvement observed in last {self.patience} epochs. ' f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n' f'To update EarlyStopping(patience={self.patience}) pass a new patience value, ' f'i.e. `python trainfd.py --patience 300` or use `--patience 0` to disable EarlyStopping.' ) return stop class ModelEMA: """ Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models Keeps a moving average of everything in the model state_dict (parameters and buffers) For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage """ def __init__(self, model, decay=0.9999, tau=2000, updates=0): # Create EMA self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA # if next(model.parameters()).device.type != 'cpu': # self.ema.half() # FP16 EMA self.updates = updates # number of EMA updates self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) for p in self.ema.parameters(): p.requires_grad_(False) def update(self, model): # Update EMA parameters with torch.no_grad(): self.updates += 1 d = self.decay(self.updates) msd = de_parallel(model).state_dict() # model state_dict for k, v in self.ema.state_dict().items(): if v.dtype.is_floating_point: v *= d v += (1 - d) * msd[k].detach() def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): # Update EMA attributes copy_attr(self.ema, model, include, exclude) ================================================ FILE: module/fuse/__init__.py ================================================ ================================================ FILE: module/fuse/discriminator.py ================================================ from torch import nn, Tensor class Discriminator(nn.Module): """ Use to discriminate fused images and source images. """ def __init__(self, dim: int = 32, size: tuple[int, int] = (224, 224)): super(Discriminator, self).__init__() self.conv = nn.Sequential( nn.Sequential( nn.Conv2d(1, dim, (3, 3), (2, 2), 1), nn.LeakyReLU(0.2, True), ), nn.Sequential( nn.Conv2d(dim, dim * 2, (3, 3), (2, 2), 1), nn.LeakyReLU(0.2, True), ), nn.Sequential( nn.Conv2d(dim * 2, dim * 4, (3, 3), (2, 2), 1), nn.LeakyReLU(0.2, True), ), ) self.flatten = nn.Flatten() self.linear = nn.Linear((size[0] // 8) * (size[1] // 8) * 4 * dim, 1) def forward(self, x: Tensor) -> Tensor: x = self.conv(x) x = self.flatten(x) x = self.linear(x) return x ================================================ FILE: module/fuse/generator.py ================================================ import torch import torch.nn as nn from torch import Tensor class Generator(nn.Module): r""" Use to generate fused images. ir + vi -> fus """ def __init__(self, dim: int = 32, depth: int = 3): super(Generator, self).__init__() self.depth = depth self.encoder = nn.Sequential( nn.Conv2d(2, dim, (3, 3), (1, 1), 1), nn.BatchNorm2d(dim), nn.ReLU() ) self.dense = nn.ModuleList([ nn.Sequential( nn.Conv2d(dim * (i + 1), dim, (3, 3), (1, 1), 1), nn.BatchNorm2d(dim), nn.ReLU() ) for i in range(depth) ]) self.fuse = nn.Sequential( nn.Sequential( nn.Conv2d(dim * (depth + 1), dim * 4, (3, 3), (1, 1), 1), nn.ReLU() ), nn.Sequential( nn.Conv2d(dim * 4, dim * 2, (3, 3), (1, 1), 1), nn.BatchNorm2d(dim * 2), nn.ReLU() ), nn.Sequential( nn.Conv2d(dim * 2, dim, (3, 3), (1, 1), 1), nn.BatchNorm2d(dim), nn.ReLU() ), nn.Sequential( nn.Conv2d(dim, 1, (3, 3), (1, 1), 1), nn.Tanh() ), ) def forward(self, ir: Tensor, vi: Tensor) -> Tensor: src = torch.cat([ir, vi], dim=1) x = self.encoder(src) for i in range(self.depth): t = self.dense[i](x) x = torch.cat([x, t], dim=1) fus = self.fuse(x) return fus ================================================ FILE: module/saliency/__init__.py ================================================ ================================================ FILE: module/saliency/u2net.py ================================================ import torch import torch.nn as nn import torch.nn.functional as F # U^2-Net: Going Deeper with Nested U-Structure for Salient Object Detection. # Author: Xuebin Qin, Zichen Zhang, Chenyang Huang et al. # Code Reference: https://github.com/xuebinqin/U-2-Net/blob/master/model/u2net.py class REBNCONV(nn.Module): def __init__(self, in_ch=3, out_ch=3, dirate=1): super(REBNCONV, self).__init__() self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate) self.bn_s1 = nn.BatchNorm2d(out_ch) self.relu_s1 = nn.ReLU(inplace=True) def forward(self, x): hx = x xout = self.relu_s1(self.bn_s1(self.conv_s1(hx))) return xout ## upsample tensor 'src' to have the same spatial size with tensor 'tar' def _upsample_like(src, tar): src = nn.functional.interpolate(src, size=tar.shape[2:], mode='bilinear') return src ### RSU-7 ### class RSU7(nn.Module): # UNet07DRES(nn.Module): def __init__(self, in_ch=3, mid_ch=12, out_ch=3): super(RSU7, self).__init__() self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1) self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=1) self.rebnconv7 = REBNCONV(mid_ch, mid_ch, dirate=2) self.rebnconv6d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) def forward(self, x): hx = x hxin = self.rebnconvin(hx) hx1 = self.rebnconv1(hxin) hx = self.pool1(hx1) hx2 = self.rebnconv2(hx) hx = self.pool2(hx2) hx3 = self.rebnconv3(hx) hx = self.pool3(hx3) hx4 = self.rebnconv4(hx) hx = self.pool4(hx4) hx5 = self.rebnconv5(hx) hx = self.pool5(hx5) hx6 = self.rebnconv6(hx) hx7 = self.rebnconv7(hx6) hx6d = self.rebnconv6d(torch.cat((hx7, hx6), 1)) hx6dup = _upsample_like(hx6d, hx5) hx5d = self.rebnconv5d(torch.cat((hx6dup, hx5), 1)) hx5dup = _upsample_like(hx5d, hx4) hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1)) hx4dup = _upsample_like(hx4d, hx3) hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1)) hx3dup = _upsample_like(hx3d, hx2) hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) hx2dup = _upsample_like(hx2d, hx1) hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) return hx1d + hxin ### RSU-6 ### class RSU6(nn.Module): # UNet06DRES(nn.Module): def __init__(self, in_ch=3, mid_ch=12, out_ch=3): super(RSU6, self).__init__() self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1) self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=2) self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) def forward(self, x): hx = x hxin = self.rebnconvin(hx) hx1 = self.rebnconv1(hxin) hx = self.pool1(hx1) hx2 = self.rebnconv2(hx) hx = self.pool2(hx2) hx3 = self.rebnconv3(hx) hx = self.pool3(hx3) hx4 = self.rebnconv4(hx) hx = self.pool4(hx4) hx5 = self.rebnconv5(hx) hx6 = self.rebnconv6(hx5) hx5d = self.rebnconv5d(torch.cat((hx6, hx5), 1)) hx5dup = _upsample_like(hx5d, hx4) hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1)) hx4dup = _upsample_like(hx4d, hx3) hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1)) hx3dup = _upsample_like(hx3d, hx2) hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) hx2dup = _upsample_like(hx2d, hx1) hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) return hx1d + hxin ### RSU-5 ### class RSU5(nn.Module): # UNet05DRES(nn.Module): def __init__(self, in_ch=3, mid_ch=12, out_ch=3): super(RSU5, self).__init__() self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=2) self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) def forward(self, x): hx = x hxin = self.rebnconvin(hx) hx1 = self.rebnconv1(hxin) hx = self.pool1(hx1) hx2 = self.rebnconv2(hx) hx = self.pool2(hx2) hx3 = self.rebnconv3(hx) hx = self.pool3(hx3) hx4 = self.rebnconv4(hx) hx5 = self.rebnconv5(hx4) hx4d = self.rebnconv4d(torch.cat((hx5, hx4), 1)) hx4dup = _upsample_like(hx4d, hx3) hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1)) hx3dup = _upsample_like(hx3d, hx2) hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) hx2dup = _upsample_like(hx2d, hx1) hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) return hx1d + hxin ### RSU-4 ### class RSU4(nn.Module): # UNet04DRES(nn.Module): def __init__(self, in_ch=3, mid_ch=12, out_ch=3): super(RSU4, self).__init__() self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=2) self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) def forward(self, x): hx = x hxin = self.rebnconvin(hx) hx1 = self.rebnconv1(hxin) hx = self.pool1(hx1) hx2 = self.rebnconv2(hx) hx = self.pool2(hx2) hx3 = self.rebnconv3(hx) hx4 = self.rebnconv4(hx3) hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1)) hx3dup = _upsample_like(hx3d, hx2) hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) hx2dup = _upsample_like(hx2d, hx1) hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) return hx1d + hxin ### RSU-4F ### class RSU4F(nn.Module): # UNet04FRES(nn.Module): def __init__(self, in_ch=3, mid_ch=12, out_ch=3): super(RSU4F, self).__init__() self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=2) self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=4) self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=8) self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=4) self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=2) self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) def forward(self, x): hx = x hxin = self.rebnconvin(hx) hx1 = self.rebnconv1(hxin) hx2 = self.rebnconv2(hx1) hx3 = self.rebnconv3(hx2) hx4 = self.rebnconv4(hx3) hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1)) hx2d = self.rebnconv2d(torch.cat((hx3d, hx2), 1)) hx1d = self.rebnconv1d(torch.cat((hx2d, hx1), 1)) return hx1d + hxin ##### U^2-Net #### class U2NET(nn.Module): def __init__(self, in_ch=3, out_ch=1): super(U2NET, self).__init__() self.stage1 = RSU7(in_ch, 32, 64) self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.stage2 = RSU6(64, 32, 128) self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.stage3 = RSU5(128, 64, 256) self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.stage4 = RSU4(256, 128, 512) self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.stage5 = RSU4F(512, 256, 512) self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.stage6 = RSU4F(512, 256, 512) # decoder self.stage5d = RSU4F(1024, 256, 512) self.stage4d = RSU4(1024, 128, 256) self.stage3d = RSU5(512, 64, 128) self.stage2d = RSU6(256, 32, 64) self.stage1d = RSU7(128, 16, 64) self.side1 = nn.Conv2d(64, out_ch, 3, padding=1) self.side2 = nn.Conv2d(64, out_ch, 3, padding=1) self.side3 = nn.Conv2d(128, out_ch, 3, padding=1) self.side4 = nn.Conv2d(256, out_ch, 3, padding=1) self.side5 = nn.Conv2d(512, out_ch, 3, padding=1) self.side6 = nn.Conv2d(512, out_ch, 3, padding=1) self.outconv = nn.Conv2d(6 * out_ch, out_ch, 1) def forward(self, x): hx = x # stage 1 hx1 = self.stage1(hx) hx = self.pool12(hx1) # stage 2 hx2 = self.stage2(hx) hx = self.pool23(hx2) # stage 3 hx3 = self.stage3(hx) hx = self.pool34(hx3) # stage 4 hx4 = self.stage4(hx) hx = self.pool45(hx4) # stage 5 hx5 = self.stage5(hx) hx = self.pool56(hx5) # stage 6 hx6 = self.stage6(hx) hx6up = _upsample_like(hx6, hx5) # -------------------- decoder -------------------- hx5d = self.stage5d(torch.cat((hx6up, hx5), 1)) hx5dup = _upsample_like(hx5d, hx4) hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1)) hx4dup = _upsample_like(hx4d, hx3) hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1)) hx3dup = _upsample_like(hx3d, hx2) hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1)) hx2dup = _upsample_like(hx2d, hx1) hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1)) # side output d1 = self.side1(hx1d) d2 = self.side2(hx2d) d2 = _upsample_like(d2, d1) d3 = self.side3(hx3d) d3 = _upsample_like(d3, d1) d4 = self.side4(hx4d) d4 = _upsample_like(d4, d1) d5 = self.side5(hx5d) d5 = _upsample_like(d5, d1) d6 = self.side6(hx6) d6 = _upsample_like(d6, d1) d0 = self.outconv(torch.cat((d1, d2, d3, d4, d5, d6), 1)) return [torch.sigmoid(x) for x in [d0, d1, d2, d3, d4, d5, d6]] ### U^2-Net small ### class U2NETP(nn.Module): def __init__(self, in_ch=3, out_ch=1): super(U2NETP, self).__init__() self.stage1 = RSU7(in_ch, 16, 64) self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.stage2 = RSU6(64, 16, 64) self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.stage3 = RSU5(64, 16, 64) self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.stage4 = RSU4(64, 16, 64) self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.stage5 = RSU4F(64, 16, 64) self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.stage6 = RSU4F(64, 16, 64) # decoder self.stage5d = RSU4F(128, 16, 64) self.stage4d = RSU4(128, 16, 64) self.stage3d = RSU5(128, 16, 64) self.stage2d = RSU6(128, 16, 64) self.stage1d = RSU7(128, 16, 64) self.side1 = nn.Conv2d(64, out_ch, 3, padding=1) self.side2 = nn.Conv2d(64, out_ch, 3, padding=1) self.side3 = nn.Conv2d(64, out_ch, 3, padding=1) self.side4 = nn.Conv2d(64, out_ch, 3, padding=1) self.side5 = nn.Conv2d(64, out_ch, 3, padding=1) self.side6 = nn.Conv2d(64, out_ch, 3, padding=1) self.outconv = nn.Conv2d(6 * out_ch, out_ch, 1) def forward(self, x): hx = x # stage 1 hx1 = self.stage1(hx) hx = self.pool12(hx1) # stage 2 hx2 = self.stage2(hx) hx = self.pool23(hx2) # stage 3 hx3 = self.stage3(hx) hx = self.pool34(hx3) # stage 4 hx4 = self.stage4(hx) hx = self.pool45(hx4) # stage 5 hx5 = self.stage5(hx) hx = self.pool56(hx5) # stage 6 hx6 = self.stage6(hx) hx6up = _upsample_like(hx6, hx5) # decoder hx5d = self.stage5d(torch.cat((hx6up, hx5), 1)) hx5dup = _upsample_like(hx5d, hx4) hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1)) hx4dup = _upsample_like(hx4d, hx3) hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1)) hx3dup = _upsample_like(hx3d, hx2) hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1)) hx2dup = _upsample_like(hx2d, hx1) hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1)) # side output d1 = self.side1(hx1d) d2 = self.side2(hx2d) d2 = _upsample_like(d2, d1) d3 = self.side3(hx3d) d3 = _upsample_like(d3, d1) d4 = self.side4(hx4d) d4 = _upsample_like(d4, d1) d5 = self.side5(hx5d) d5 = _upsample_like(d5, d1) d6 = self.side6(hx6) d6 = _upsample_like(d6, d1) d0 = self.outconv(torch.cat((d1, d2, d3, d4, d5, d6), 1)) return [torch.sigmoid(x) for x in [d0, d1, d2, d3, d4, d5, d6]] ================================================ FILE: pipeline/__init__.py ================================================ ================================================ FILE: pipeline/detect.py ================================================ import logging import sys from pathlib import Path from typing import Literal, List, Tuple import numpy import torch import torch.backends.cudnn from numpy import number from torch import Tensor, nn from torchvision.ops import box_convert from torchvision.utils import draw_bounding_boxes from functions.get_param_groups import get_param_groups from module.detect.models.common import Conv from module.detect.models.yolo import Model from module.detect.utils.general import labels_to_class_weights, non_max_suppression from module.detect.utils.loss import ComputeLoss from module.detect.utils.metrics import box_iou class Detect: r""" Init detect pipeline to detect objects from fused images. """ def __init__(self, config, mode: Literal['train', 'inference'], nc: int, classes: List[str], labels: List[Tensor]): # attach hyper parameters self.config = config self.mode = mode # fuse computation mode: train(grad+graph), eval(graph), inference(x) # init device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') logging.info(f'deploy {config.detect.model} on device {str(device)}') self.device = device # init yolo model model_t = config.detect.model config_p = Path(__file__).parents[1] / 'module' / 'detect' / 'models' / f'{model_t}.yaml' net = Model(cfg=config_p, ch=config.detect.channels, nc=nc).to(self.device) logging.info(f'init {model_t} with (nc: {nc})') self.net = net # init hyperparameters hyp = config.loss.detect nl = net.model[-1].nl # number of detection layers # model parameters hyp['box'] *= 3 / nl # scale to layers hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers hyp['obj'] *= (config.train.image_size[0] / 640) ** 2 * 3 / nl # scale to image size and layers hyp['label_smoothing'] = False # label smoothing # attach constants net.nc = nc # attach number of classes to model net.hyp = hyp # attach hyper parameters to model net.class_weights = labels_to_class_weights(labels, nc).to(self.device) # attach class weights net.names = classes # load pretrained parameters (optional) d_ckpt = config.detect.pretrained if d_ckpt is not None: if 'http' in d_ckpt: ckpt_p = Path.cwd() / 'weights' / 'v1' / 'tardal.pth' url = d_ckpt logging.info(f'download pretrained parameters from {url}') try: ckpt = torch.hub.load_state_dict_from_url(url, model_dir=ckpt_p.parent, map_location='cpu') except Exception as err: logging.fatal(f'connect to {url} failed: {err}, try download pretrained weights manually') sys.exit(1) else: ckpt = torch.load(d_ckpt, map_location='cpu') self.load_ckpt(ckpt) # criterion (reference: YOLOv5 official) self.loss = ComputeLoss(net) def load_ckpt(self, ckpt: dict): ckpt = ckpt if 'detect' not in ckpt else ckpt['detect'] self.net.load_state_dict(ckpt) def load_ckpt_fuse(self, ckpt: dict): ckpt = ckpt if 'detect' not in ckpt else ckpt['detect'] # fuse conv & bn self.net.fuse() # compatibility updates for m in self.net.modules(): t = type(m) if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): m.inplace = True # torch 1.7.0 compatibility if t is Detect and not isinstance(m.anchor_grid, list): delattr(m, 'anchor_grid') setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) elif t is Conv: m._non_persistent_buffers_set = set() # torch 1.6.0 compatibility elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): m.recompute_scale_factor = None # torch 1.11.0 compatibility # return as expect self.net.load_state_dict(ckpt) def save_ckpt(self) -> dict: ckpt = {'detect': self.net.state_dict()} return ckpt def forward(self, imgs: Tensor) -> Tensor: self.net.train() pred = self.net(imgs) return pred @torch.no_grad() def eval(self, imgs: Tensor, targets: Tensor, stats: List, preview: bool = False) -> Tuple[int, Tensor | None]: self.net.eval() # forward preds, _ = self.net(imgs) # (xyxy, conf, cls) [h, w] # convert pred format batch_size, _, height, width = imgs.shape targets[:, 2:] *= torch.tensor((width, height, width, height), device=self.device) # (id, cls, xyxy) [1, 1] -> [h, w] preds = non_max_suppression(preds, conf_thres=0.001, iou_thres=0.6, labels=[], multi_label=True) # (xyxy, conf, cls) [h, w] # const iou_v = torch.linspace(0.5, 0.95, 10).to(self.device) # iou vector for mAP@0.5:0.95 n_iou = iou_v.numel() # record seen = 0 # statistics per images for si, pred in enumerate(preds): labels = targets[targets[:, 0] == si, 1:] # (cls, xyxy) [h, w] num_l, num_p = labels.shape[0], pred.shape[0] correct = torch.zeros(num_p, n_iou, dtype=torch.bool, device=self.device) seen += 1 # no pred result if num_p == 0: if num_l: stats.append((correct, *torch.zeros((2, 0), device=self.device), labels[:, 0])) continue # predictions pred_n = pred.clone() # evaluate if num_l: t_box = labels[:, 1:5] # (xyxy) [h, w] labels_n = torch.cat((labels[:, 0:1], t_box), 1) # (xyxy, cls) [h, w] correct = self.process_batch(pred_n, labels_n, iou_v) # update stats matrix stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls) # preview if preview: prv = self.preview(imgs, preds) return seen, prv # return as expected return seen, None @torch.inference_mode() def inference(self, imgs: Tensor) -> Tensor: self.net.eval() # forward preds, _ = self.net(imgs) # convert pred format batch_size, _, height, width = imgs.shape preds = non_max_suppression(preds, conf_thres=0.001, iou_thres=0.6, multi_label=True) # [xyxy, conf, cls] # return as expected return preds def criterion(self, imgs: Tensor, targets: Tensor) -> Tuple[Tensor, List[number]]: """ criterion on detector """ logging.debug('criterion on yolo') # forward pred = self.forward(imgs) # (bs, 3, 80, 80, class + 5) # calculate loss targets[:, 2:] = box_convert(targets[:, 2:], 'xyxy', 'cxcywh') # (idx, cls, x1, y1, x2, y2) -> (idx, cls, cx, cy, w, h) loss, loss_items = self.loss(pred, targets.to(self.device)) return loss, [x.item() for x in loss_items] @staticmethod def preview(imgs: Tensor, preds: Tensor, conf_th: float = 0.6): imgs_mk = [] # preds: (xyxy, conf, cls) # mark on images for img, pred in zip(imgs, preds): pred = list(filter(lambda x: x[4] > conf_th, pred)) logging.debug(f'detect {len(pred)} on images') img = (img * 255).type(torch.uint8) boxes = [x[:4] for x in pred] cls = [int(x[5].cpu().numpy()) for x in pred] labels = [f'{[cls]}: {x[4].cpu().numpy():.2f}' for cls, x in zip(cls, pred)] if len(boxes): img = draw_bounding_boxes(img, torch.stack(boxes, dim=0), labels=labels, width=2) imgs_mk.append((img / 255).float().to(imgs.device)) # fill or crop to 9 images if len(imgs_mk) > 9: imgs_mk = imgs_mk[:9] elif len(imgs_mk) < 9: zeros = [torch.zeros_like(imgs_mk[0], device=imgs[0].device) for _ in range(9 - len(imgs_mk))] imgs_mk = imgs_mk + zeros # merge images(9, 3, h, w) to one image (3, 3h, 3w) imgs_mk = torch.stack(imgs_mk, dim=0) imgs_c = [] for i in range(3): t = [imgs_mk[i * 3 + j] for j in range(3)] # [(3, h, w), (3, h, w), (3, h, w)] imgs_c.append(torch.cat(t, dim=2)) # (3, h, 3w) imgs_one = torch.cat(imgs_c, dim=1) # (3, 3h, 3w) # return as expected return imgs_one def param_groups(self) -> tuple[List, List, List]: group = [], [], [] tmp = get_param_groups(self.net) for idx in range(3): group[idx].extend(tmp[idx]) return group @staticmethod def process_batch(detections, labels, iou_v): """ Return correct predictions' matrix. Both sets of boxes are in (x1, y1, x2, y2) format. Arguments: detections (Array[N, 6]), x1, y1, x2, y2, conf, class labels (Array[M, 5]), class, x1, y1, x2, y2 iou_v (Array[10]), iou thresholds Returns: correct (Array[N, 10]), for 10 IoU levels """ correct = torch.zeros(detections.shape[0], iou_v.shape[0], dtype=torch.bool, device=iou_v.device) iou = box_iou(labels[:, 1:], detections[:, :4]) x = torch.where((iou >= iou_v[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[numpy.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[numpy.unique(matches[:, 0], return_index=True)[1]] matches = torch.Tensor(matches).to(iou_v.device) correct[matches[:, 1].long()] = matches[:, 2:3] >= iou_v return correct ================================================ FILE: pipeline/fuse.py ================================================ import logging import sys from pathlib import Path from typing import Literal, List, Tuple, Optional import torch import torch.backends.cudnn from kornia.filters import spatial_gradient from kornia.losses import MS_SSIMLoss, ssim_loss from numpy import number from torch import Tensor from torch.nn.functional import l1_loss from functions.div_loss import div_loss from functions.get_param_groups import get_param_groups from module.fuse.discriminator import Discriminator from module.fuse.generator import Generator class Fuse: r""" Init fuse pipeline to generate fused images from infrared and visible images. """ def __init__(self, config, mode: Literal['train', 'inference']): # attach hyper parameters self.config = config self.mode = mode # fuse computation mode: train(grad+graph), eval(graph), inference(x) modules = [] # init device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') logging.info(f'deploy tardal-fuse on device {str(device)}') self.device = device # init tardal generator f_dim, f_depth = config.fuse.dim, config.fuse.depth generator = Generator(dim=f_dim, depth=f_depth) modules.append(generator) logging.info(f'init generator with (dim: {f_dim} depth: {f_depth})') self.generator = generator # init tardel discriminator during train mode if mode == 'train': f_size = config.train.image_size dis_t = Discriminator(dim=f_dim, size=f_size) dis_d = Discriminator(dim=f_dim, size=f_size) modules += [dis_t, dis_d] logging.info(f'init discriminators with (dim: {f_dim} size: {f_size})') self.dis_t, self.dis_d = dis_t, dis_d # load pretrained parameters (optional) f_ckpt = config.fuse.pretrained if f_ckpt is not None: if 'http' in f_ckpt: ckpt_p = Path.cwd() / 'weights' / 'v1' / 'tardal.pth' url = f_ckpt logging.info(f'download pretrained parameters from {url}') try: ckpt = torch.hub.load_state_dict_from_url(url, model_dir=ckpt_p.parent, map_location='cpu') except Exception as err: logging.fatal(f'connect to {url} failed: {err}, try download pretrained weights manually') sys.exit(1) else: ckpt = torch.load(f_ckpt, map_location='cpu') self.load_ckpt(ckpt) # criterion if config.loss.fuse.src_fn == 'v1': ms_ssim_loss = MS_SSIMLoss() modules.append(ms_ssim_loss) self.ms_ssim_loss = ms_ssim_loss # move to device _ = [x.to(device) for x in modules] # more parameters # WGAN div hyper parameters self.wk, self.wp = 2, 6 def load_ckpt(self, ckpt: dict): f_ckpt = ckpt if 'fuse' not in ckpt else ckpt['fuse'] # check eval mode if self.config.inference.use_eval is None: if 'use_eval' in f_ckpt: logging.warning(f'overwriting inference.use_eval {self.config.inference.use_eval} with {f_ckpt["use_eval"]}') self.config.inference.use_eval = f_ckpt['use_eval'] else: logging.warning(f'no use_eval settings found, using default (true)') self.config.inference.use_eval = True if 'use_eval' in f_ckpt: f_ckpt.pop('use_eval') # load state dict self.generator.load_state_dict(f_ckpt) if self.mode == 'train' and 'disc' in ckpt: self.dis_t.load_state_dict(ckpt['disc']['t']) self.dis_d.load_state_dict(ckpt['disc']['d']) def save_ckpt(self) -> dict: ckpt = {'fuse': self.generator.state_dict()} if self.mode == 'train': ckpt |= {'disc': {'t': self.dis_t.state_dict(), 'd': self.dis_t.state_dict()}} return ckpt def forward(self, ir: Tensor, vi: Tensor) -> Tensor: self.generator.train() fus = self.generator(ir, vi) return fus @torch.no_grad() def eval(self, ir: Tensor, vi: Tensor) -> Tensor: self.generator.eval() fus = self.generator(ir, vi) return fus @torch.inference_mode() def inference(self, ir: Tensor, vi: Tensor) -> Tensor: if self.config.inference.use_eval: self.generator.eval() fus = self.generator(ir, vi) return fus def criterion_dis_t(self, ir: Tensor, vi: Tensor, mk: Tensor) -> Tensor: """ criterion on target discriminator 'ir * m <- pixel distribution -> fus * m' """ logging.debug('criterion on target discriminator') # switch to train mode self.dis_t.train() # sample real & fake real_s = ir * mk fake_s = self.eval(ir, vi) * mk fake_s.detach_() # judge value towards real & fake real_v = torch.squeeze(self.dis_t(real_s)) fake_v = torch.squeeze(self.dis_t(fake_s)) # loss calculate real_l, fake_l = -real_v.mean(), fake_v.mean() div = div_loss(self.dis_t, real_s, fake_s, self.wp) loss = real_l + fake_l + self.wk * div return loss def criterion_dis_d(self, ir: Tensor, vi: Tensor, mk: Tensor) -> Tensor: """ criterion on detail discriminator 'vi * m <- grad distribution -> fus * (1-m)' mask: optional """ logging.debug('criterion on detail discriminator') # switch to train mode self.dis_d.train() # sample real & fake mk = mk if self.config.loss.fuse.d_mask else 0 # use mask or not real_s = self.gradient(vi) * (1 - mk) fake_s = self.gradient(self.eval(ir, vi)) * (1 - mk) fake_s.detach_() # judge value towards real & fake real_v = torch.squeeze(self.dis_d(real_s)) fake_v = torch.squeeze(self.dis_d(fake_s)) # loss calculate real_l, fake_l = -real_v.mean(), fake_v.mean() div = div_loss(self.dis_d, real_s, fake_s, self.wp) loss = real_l + fake_l + self.wk * div return loss def criterion_generator(self, ir: Tensor, vi: Tensor, mk: Tensor, w1: Tensor, w2: Tensor, d_warming: bool = True): """ criterion on generator 'ir, vi <- loss -> fus' return: Tuple[Tensor, List[number]] (only fuse), Tuple[Tensor, Tensor, List[number]] (joint mode) """ logging.debug('criterion on generator') # forward (train mode for calculate loss) fus = self.forward(ir, vi) # calculate src and adv loss f_loss = self.config.loss.fuse src_w, adv_w = f_loss.src, f_loss.adv adv_w = 0 if d_warming else adv_w src_l = w1 * self.src_loss(fus, ir) + w2 * self.src_loss(fus, vi) adv_l, tar_l, det_l = self.adv_loss(fus, mk) loss = src_w * src_l.mean() + adv_w * adv_l.mean() # only fuse return loss, [src_l.mean().item(), adv_l.mean().item(), tar_l, det_l] @staticmethod def gradient(x: Tensor, eps: float = 1e-8) -> Tensor: s = spatial_gradient(x, 'sobel') dx, dy = s[:, :, 0, :, :], s[:, :, 1, :, :] u = torch.sqrt(torch.pow(dx, 2) + torch.pow(dy, 2) + eps) # sqrt backwork x range: (0, n] return u def src_loss(self, x: Tensor, y: Tensor) -> Tensor: src_fn = self.config.loss.fuse.src_fn match src_fn: case 'v0': "fus <- 0.01*ssim + 0.99*l1 -> src" return 0.01 * ssim_loss(x, y, window_size=11) + 0.99 * l1_loss(x, y) case 'v1': "fus <- ms-ssim -> src" return self.ms_ssim_loss(x, y) case _: assert NotImplemented, f'unsupported src function: {src_fn}' def adv_loss(self, fus: Tensor, mk: Tensor) -> Tuple[Tensor, number, number]: # weights f_loss = self.config.loss.fuse tar_w, det_w = f_loss.t_adv, f_loss.d_adv # target loss self.dis_t.eval() tar_l = -self.dis_t(fus * mk) # fus * m -> target pixel distribution (max -> -min) # detail loss self.dis_d.eval() mk = mk if self.config.loss.fuse.d_mask else 0 # use mask or not det_l = -self.dis_d(self.gradient(fus) * (1 - mk)) # grad(fus) * (1-m) -> grad distribution (max -> -min) return tar_w * tar_l + det_w * det_l, tar_l.mean().item(), det_l.mean().item() def param_groups(self, key: Optional[Literal['g', 'd']] = None) -> tuple[List, List, List]: match key: case 'g': return self.g_params() case 'd': return self.d_params() case _: g_params, d_params = self.g_params(), self.d_params() group = [], [], [] for idx in range(3): group[idx].extend(g_params[idx]) group[idx].extend(d_params[idx]) return group def g_params(self) -> tuple[List, List, List]: return get_param_groups(self.generator) def d_params(self) -> tuple[List, List, List]: group = [], [], [] for module in [self.dis_t, self.dis_d]: tmp = get_param_groups(module) for idx in range(3): group[idx].extend(tmp[idx]) return group ================================================ FILE: pipeline/iqa.py ================================================ import logging import socket import sys from pathlib import Path from typing import Literal import cv2 import torch.cuda from kornia import image_to_tensor, tensor_to_image from torch import Tensor from torchvision.models import vgg16 from torchvision.transforms import Compose, Resize, Normalize from tqdm import tqdm class IQA: r""" Init information measurement pipeline to generate iqa from source images. """ def __init__(self, url: str): # init device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') logging.info(f'deploy iqa on device {str(device)}') self.device = device # init vgg backbone extractor = vgg16().features logging.info(f'init iqa extractor with (3 -> 1)') self.extractor = extractor # download pretrained parameters ckpt_p = Path.cwd() / 'weights' / 'v1' / 'iqa.pth' logging.info(f'download pretrained iqa weights from {url}') socket.setdefaulttimeout(5) try: logging.info(f'starting download of pretrained weights from {url}') ckpt = torch.hub.load_state_dict_from_url(url, model_dir=ckpt_p.parent, map_location='cpu') except Exception as err: logging.fatal(f'load {url} failed: {err}, try download pretrained weights manually') sys.exit(1) extractor.load_state_dict(ckpt) logging.info(f'load pretrained iqa weights from {str(ckpt_p)}') # move to device extractor.to(device) # more parameters self.transform_fn = Compose([Resize((672, 672)), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) self.upsample = Resize((672, 672)) @torch.inference_mode() def inference(self, src: str | Path, dst: str | Path): self.modality_inference(src, dst, 'ir') self.modality_inference(src, dst, 'vi') @torch.inference_mode() def modality_inference(self, src: str | Path, dst: str | Path, modality: Literal['ir', 'vi']): # create save folder dst = Path(dst / modality) dst.mkdir(parents=True, exist_ok=True) logging.debug(f'create save folder {str(dst)}') # forward self.extractor.eval() img_list = sorted(Path(src / modality).rglob('*.png')) logging.info(f'load {len(img_list)} images from {str(src)}') process = tqdm(img_list) for img_p in process: process.set_description(f'generate iqa for {img_p.name} to {str(dst)}') img = self._imread(img_p).to(self.device) reverse_fn = Resize(size=img.shape[-2:]) iqa = self.extractor_inference(img.unsqueeze(0))[0] iqa = reverse_fn(iqa).squeeze() cv2.imwrite(str(dst / img_p.name), tensor_to_image(iqa) * 255) @torch.inference_mode() def extractor_inference(self, x: Tensor) -> Tensor: # information measurement l_ids = [3, 8, 15, 22, 29] # layers before max-pooling f = [] x = x.repeat(1, 3, 1, 1) if x.size(dim=1) == 1 else x x = self.transform_fn(x) for index, layer in enumerate(self.extractor): x = layer(x) if index in l_ids: t = x.mean(axis=1, keepdims=True) f.append(self.upsample(t)) f = torch.cat(f, dim=1).mean(axis=1, keepdims=True) return f @staticmethod def _imread(img_p: str | Path): img = cv2.imread(str(img_p), cv2.IMREAD_GRAYSCALE) img = image_to_tensor(img).float() / 255 return img ================================================ FILE: pipeline/saliency.py ================================================ import logging import socket import sys import warnings from pathlib import Path import cv2 import torch.hub from kornia import image_to_tensor, tensor_to_image from torchvision.transforms import Resize, Compose, Normalize from tqdm import tqdm from module.saliency.u2net import U2NETP class Saliency: r""" Init saliency detection pipeline to generate mask from infrared images. """ def __init__(self, url: str): # init device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') logging.info(f'deploy u2net on device {str(device)}') self.device = device # init u2net small (u2netp) net = U2NETP(in_ch=1, out_ch=1) logging.info(f'init u2net small model with (1 -> 1)') self.net = net # download pretrained parameters ckpt_p = Path.cwd() / 'weights' / 'v1' / 'u2netp.pth' logging.info(f'download pretrained u2net weights from {url}') socket.setdefaulttimeout(5) try: logging.info(f'starting download of pretrained weights from {url}') ckpt = torch.hub.load_state_dict_from_url(url, model_dir=ckpt_p.parent, map_location='cpu') except Exception as err: logging.fatal(f'load {url} failed: {err}, try download pretrained weights manually') sys.exit(1) net.load_state_dict(ckpt) logging.info(f'load pretrained u2net weights from {str(ckpt_p)}') # move to device net.to(device) # more parameters self.transform_fn = Compose([Resize(size=(320, 320)), Normalize(mean=0.485, std=0.229)]) @torch.inference_mode() def inference(self, src: str | Path, dst: str | Path): # create save folder dst = Path(dst) dst.mkdir(parents=True, exist_ok=True) logging.debug(f'create save folder {str(dst)}') # forward self.net.eval() warnings.filterwarnings(action='ignore', lineno=780) img_list = sorted(Path(src).rglob('*.png')) logging.info(f'load {len(img_list)} images from {str(src)}') process = tqdm(img_list) for img_p in process: process.set_description(f'generate mask for {img_p.name} to {str(dst)}') img = self._imread(img_p).to(self.device) reverse_fn = Resize(size=img.shape[-2:]) img = self.transform_fn(img) mask = self.net(img.unsqueeze(0))[0] mask = (mask - mask.min()) / (mask.max() - mask.min()) mask = reverse_fn(mask).squeeze() cv2.imwrite(str(dst / img_p.name), tensor_to_image(mask) * 255) @staticmethod def _imread(img_p: str | Path): img = cv2.imread(str(img_p), cv2.IMREAD_GRAYSCALE) img = image_to_tensor(img).float() / 255 return img ================================================ FILE: pipeline/train.py ================================================ import logging from functools import reduce from pathlib import Path import torch import wandb from kornia.filters import SpatialGradient from kornia.losses import SSIMLoss from kornia.metrics import AverageMeter from torch import nn, Tensor from torch.optim import RMSprop from torch.utils.data import DataLoader from torchvision import transforms from tqdm import tqdm from functions.div_loss import div_loss from modules.discriminator import Discriminator from modules.generator import Generator from utils.environment_probe import EnvironmentProbe from utils.fusion_data import FusionData class Train: """ The train process for TarDAL. """ def __init__(self, environment_probe: EnvironmentProbe, config: dict): logging.info(f'TarDAL Training | mask: {config.mask} | weight: {config.weight} | adv: {config.adv_weight}') self.config = config self.environment_probe = environment_probe # modules logging.info(f'generator | dim: {config.dim} | depth: {config.depth}') self.generator = Generator(config.dim, config.depth) logging.info(f'discriminator | dim: {config.dim} | size: {config.size}') self.dis_target = Discriminator(config.dim, (config.size, config.size)) self.dis_detail = Discriminator(config.dim, (config.size, config.size)) # WGAN adam optim logging.info(f'RMSprop | learning rate: {config.learning_rate}') self.opt_generator = RMSprop(self.generator.parameters(), lr=config.learning_rate) self.opt_dis_target = RMSprop(self.dis_target.parameters(), lr=config.learning_rate) self.opt_dis_detail = RMSprop(self.dis_detail.parameters(), lr=config.learning_rate) # move to device logging.info(f'module device: {environment_probe.device}') self.generator.to(environment_probe.device) self.dis_target.to(environment_probe.device) self.dis_detail.to(environment_probe.device) # loss self.l1 = nn.L1Loss(reduction='none') self.ssim = SSIMLoss(window_size=11, reduction='none') self.l1.cuda() self.ssim.cuda() # functions self.spatial = SpatialGradient('diff') # WGAN div hyper parameters self.wk, self.wp = 2, 6 # datasets folder = Path(config.folder) resize = transforms.Resize((config.size, config.size)) dataset = FusionData(folder, config.mask, 'train', transforms=resize) self.dataloader = DataLoader(dataset, config.batch_size, True, num_workers=config.num_workers, pin_memory=True) logging.info(f'dataset | folder: {str(folder)} | size: {len(self.dataloader) * config.batch_size}') def train_dis_target(self, ir: Tensor, vi: Tensor, mk: Tensor) -> Tensor: """ Train target discriminator for 'ir * m <- pixel -> fus * m' """ logging.debug('train target discriminator') # switch to train mode self.dis_target.train() # sample real & fake real_s = ir * mk self.generator.eval() fake_s = self.generator(ir, vi).detach() * mk # judge value towards real & fake real_v = torch.squeeze(self.dis_target(real_s)) fake_v = torch.squeeze(self.dis_target(fake_s)) # loss calculate real_l, fake_l = -real_v.mean(), fake_v.mean() div = div_loss(self.dis_target, real_s, fake_s, self.wp) loss = real_l + fake_l + self.wk * div # backward self.opt_dis_target.zero_grad() loss.backward() self.opt_dis_target.step() return loss.item() def train_dis_detail(self, ir: Tensor, vi: Tensor, mk: Tensor) -> Tensor: """ Train detail discriminator for 'vi * (1-m) <- Grad -> fus * (1-m)' """ logging.debug('train detail discriminator') # switch to train mode self.dis_detail.train() # sample real & fake real_s = self.gradient(vi * (1 - mk)) self.generator.eval() fake_s = self.gradient(self.generator(ir, vi).detach() * (1 - mk)) # judge value towards real & fake real_v = torch.squeeze(self.dis_detail(real_s)) fake_v = torch.squeeze(self.dis_detail(fake_s)) # loss calculate real_l, fake_l = -real_v.mean(), fake_v.mean() div = div_loss(self.dis_detail, real_s, fake_s, self.wp) loss = real_l + fake_l + self.wk * div # backward self.opt_dis_detail.zero_grad() loss.backward() self.opt_dis_detail.step() return loss.item() def gradient(self, x: Tensor, eps: float = 1e-6) -> Tensor: s = self.spatial(x) dx, dy = s[:, :, 0, :, :], s[:, :, 1, :, :] u = torch.sqrt(torch.pow(dx, 2) + torch.pow(dy, 2) + eps) return u def train_generator(self, ir: Tensor, vi: Tensor, mk: Tensor, s1: Tensor, s2: Tensor) -> dict: """ Train generator 'ir + vi -> fus' """ logging.debug('train generator') self.generator.train() fus = self.generator(ir, vi) # calculate loss towards criterion b1, b2, b3 = self.config.weight # b1 * ssim + b2 * l1 + b3 * adv l_ir = b1 * self.ssim(fus, ir) + b2 * self.l1(fus, ir) l_vi = b1 * self.ssim(fus, vi) + b2 * self.l1(fus, vi) w1, w2 = 0.5 + 0.5 * (s1 - s2), 0.5 + 0.5 * (s2 - s1) # data driven loss weights l_src = w1 * l_ir + w2 * l_vi # fus <- ssim + l1 -> (ir, vi) l_src = l_src.mean() self.dis_target.eval() l_target = -self.dis_target(fus * mk).mean() # judge target: fus * m self.dis_detail.eval() l_detail = -self.dis_detail(self.gradient(fus * (1 - mk))).mean() # judge detail: Grad(fus * (1-mk)) c1, c2 = self.config.adv_weight # c1 * l_target + c2 * l_detail l_adv = c1 * l_target + c2 * l_detail loss = l_src + b3 * l_adv # backward self.opt_generator.zero_grad() loss.backward() self.opt_generator.step() # loss state state = { 'g_loss': loss.item(), 'g_src_ir': l_ir.mean().item(), 'g_src_vi': l_vi.mean().item(), 'g_adv_target': l_target.item(), 'g_adv_detail': l_detail.item(), } return state def run(self): for epoch in range(1, self.config.epochs + 1): process = tqdm(enumerate(self.dataloader), disable=not self.config.debug) meter = AverageMeter() for idx, sample in process: ir, vi, mk = sample['ir'], sample['vi'], sample['mk'] s1, s2 = sample['vsm']['ir'], sample['vsm']['vi'] im = torch.cat([ir, vi, mk, s1, s2], dim=1) im = im.to(self.environment_probe.device) ir, vi, mk, s1, s2 = torch.chunk(im, 5, dim=1) g_loss = self.train_generator(ir, vi, mk, s1, s2) d_target_loss = self.train_dis_target(ir, vi, mk) d_detail_loss = self.train_dis_detail(ir, vi, mk) process.set_description(f'g: {g_loss["g_loss"]:03f} | d: {d_target_loss:03f}, {d_detail_loss:03f}') meter.update(Tensor(list(g_loss.values()) + [d_target_loss] + [d_detail_loss])) keys = ['g_loss', 'g_src_ir', 'g_src_vi', 'g_adv_t', 'g_adv_d', 'd_t', 'd_d'] state = reduce(lambda x, y: x | y, [{k: v} for k, v in zip(keys, meter.avg)]) print(state) wandb.log(state) if epoch % 5 == 0: self.save(epoch) def save(self, epoch: int): path = Path(self.config.cache) / self.config.id path.mkdir(parents=True, exist_ok=True) cache = path / f'{epoch:03d}.pth' logging.info(f'save checkpoint to {str(cache)}') state = { 'g': self.generator.state_dict(), 'd': { 't': self.dis_target.state_dict(), 'd': self.dis_target.state_dict(), }, 'opt': { 'g': self.opt_generator.state_dict(), 't': self.opt_dis_target.state_dict(), 'd': self.opt_dis_detail.state_dict(), }, } torch.save(state, cache) ================================================ FILE: requirements.txt ================================================ # TarDAL requirements # Usage: pip install -r requirements.txt # Base ---------------------------------------- numpy>=1.20 torch>=1.9 torchvision>=0.10 kornia>=0.6.8 opencv-python>=4.5.5.64 PyYAML>=6.0 pandas>=1.5.1 matplotlib>=3.6.3 # Logging ------------------------------------- wandb>=0.12.11 tqdm>=4.63.0 tabulate>=0.8.9 ================================================ FILE: scripts/__init__.py ================================================ from scripts.infer_f import InferF from scripts.infer_fd import InferFD from scripts.train_f import TrainF from scripts.train_fd import TrainFD __all__ = ['TrainF', 'TrainFD', 'InferF', 'InferFD'] ================================================ FILE: scripts/infer_f.py ================================================ import logging from pathlib import Path import torch import yaml from kornia.color import ycbcr_to_rgb from torch.utils.data import DataLoader from tqdm import tqdm import loader from config import ConfigDict, from_dict from pipeline.fuse import Fuse from tools.dict_to_device import dict_to_device class InferF: def __init__(self, config: str | Path | ConfigDict, save_dir: str | Path): # init logger log_f = '%(asctime)s | %(filename)s[line:%(lineno)d] | %(levelname)s | %(message)s' logging.basicConfig(level='INFO', format=log_f) logging.info(f'TarDAL-v1 Inference Script') # init config if isinstance(config, str) or isinstance(config, Path): config = yaml.safe_load(Path(config).open('r')) config = from_dict(config) # convert dict to object else: config = config self.config = config # debug mode if config.debug.fast_run: logging.warning('fast run mode is on, only for debug!') # create save(output) folder save_dir = Path(save_dir) save_dir.mkdir(parents=True, exist_ok=True) logging.info(f'create save folder {str(save_dir)}') self.save_dir = save_dir # init dataset & dataloader data_t = getattr(loader, config.dataset.name) # dataset type self.data_t = data_t p_dataset = data_t(root=config.dataset.root, mode='pred', config=config) self.p_loader = DataLoader( p_dataset, batch_size=config.inference.batch_size, shuffle=False, collate_fn=data_t.collate_fn, pin_memory=True, num_workers=config.inference.num_workers, ) # init pipeline fuse = Fuse(config, mode='inference') self.fuse = fuse @torch.inference_mode() def run(self): p_l = tqdm(self.p_loader, total=len(self.p_loader), ncols=120) for sample in p_l: sample = dict_to_device(sample, self.fuse.device) # f_net forward fus = self.fuse.inference(ir=sample['ir'], vi=sample['vi']) # recolor if self.data_t.color and self.config.inference.grayscale is False: fus = torch.cat([fus, sample['cbcr']], dim=1) fus = ycbcr_to_rgb(fus) # save images self.data_t.pred_save( fus, [self.save_dir / name for name in sample['name']], shape=sample['shape'] ) ================================================ FILE: scripts/infer_fd.py ================================================ import logging from pathlib import Path import torch import yaml from kornia.color import ycbcr_to_rgb from torch.utils.data import DataLoader from tqdm import tqdm import loader from config import ConfigDict, from_dict from pipeline.detect import Detect from pipeline.fuse import Fuse from tools.dict_to_device import dict_to_device class InferFD: def __init__(self, config: str | Path | ConfigDict, save_dir: str | Path): # init logger log_f = '%(asctime)s | %(filename)s[line:%(lineno)d] | %(levelname)s | %(message)s' logging.basicConfig(level='INFO', format=log_f) logging.info(f'TarDAL-v1 Inference Script') # init config if isinstance(config, str) or isinstance(config, Path): config = yaml.safe_load(Path(config).open('r')) config = from_dict(config) # convert dict to object else: config = config self.config = config # debug mode if config.debug.fast_run: logging.warning('fast run mode is on, only for debug!') # save label as txt warning if config.inference.save_txt: logging.warning('labels will be saved as txt, this will slow down the inference speed!') # create save(output) folder save_dir = Path(save_dir) save_dir.mkdir(parents=True, exist_ok=True) (save_dir / 'images').mkdir(exist_ok=True) (save_dir / 'labels').mkdir(exist_ok=True) logging.info(f'create save folder {str(save_dir)}') self.save_dir = save_dir # init dataset & dataloader data_t = getattr(loader, config.dataset.name) # dataset type self.data_t = data_t p_dataset = data_t(root=config.dataset.root, mode='pred', config=config) self.p_loader = DataLoader( p_dataset, batch_size=config.inference.batch_size, shuffle=False, collate_fn=data_t.collate_fn, pin_memory=True, num_workers=config.inference.num_workers, ) # init pipeline fuse = Fuse(config, mode='inference') self.fuse = fuse detect = Detect(config, mode='inference', nc=len(p_dataset.classes), classes=p_dataset.classes, labels=p_dataset.labels) self.detect = detect @torch.inference_mode() def run(self): p_l = tqdm(self.p_loader, total=len(self.p_loader), ncols=80) for sample in p_l: sample = dict_to_device(sample, self.fuse.device) # set description p_l.set_description(f'infer {sample["name"][0]} ({len(sample["name"])} images)') # f_net forward fus = self.fuse.inference(ir=sample['ir'], vi=sample['vi']) # recolor if self.data_t.color and self.config.inference.grayscale is False: fus = torch.cat([fus, sample['cbcr']], dim=1) fus = ycbcr_to_rgb(fus) # d_net forward pred = self.detect.inference(fus) # save images self.data_t.pred_save( fus, [self.save_dir / name for name in sample['name']], shape=sample['shape'], pred=pred, save_txt=self.config.inference.save_txt, ) ================================================ FILE: scripts/train_f.py ================================================ import argparse import logging from functools import reduce from pathlib import Path import torch import wandb import yaml from kornia.metrics import AverageMeter from torch import Tensor from torch.optim import AdamW, Adam, SGD from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from tqdm import tqdm import loader from config import from_dict, ConfigDict from pipeline.fuse import Fuse from tools.dict_to_device import dict_to_device class TrainF: def __init__(self, config: str | Path | ConfigDict, wandb_key: str): # init logger log_f = '%(asctime)s | %(filename)s[line:%(lineno)d] | %(levelname)s | %(message)s' logging.basicConfig(level='INFO', format=log_f) logging.info(f'TarDAL-v1 Training Script') # init config if isinstance(config, str) or isinstance(config, Path): config = yaml.safe_load(Path(config).open('r')) config = from_dict(config) # convert dict to object else: config = config self.config = config # debug mode if config.debug.fast_run: logging.warning('fast run mode is on, only for debug!') # wandb run wandb.login(key=wandb_key) # wandb api key runs = wandb.init(project='TarDAL-v1', config=config, mode=config.debug.wandb_mode) self.runs = runs # init save folder save_dir = Path(self.config.save_dir) / self.runs.id save_dir.mkdir(parents=True, exist_ok=True) self.save_dir = save_dir logging.info(f'model weights will be saved to {str(save_dir)}') # init pipeline fuse = Fuse(config, mode='train') self.fuse = fuse # freeze & grad for k, v in fuse.generator.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in config.train.freeze): logging.info(f'freezing {k}') v.requires_grad = False # init optimizer o_cfg = config.optimizer fuse_pg = fuse.param_groups() # [weight(with decay), weight(no decay), bias] groups = [ {'params': fuse_pg[0], 'lr': o_cfg.lr_i, 'weight_decay': o_cfg.weight_decay}, {'params': fuse_pg[1], 'lr': o_cfg.lr_i, 'weight_decay': 0}, ] match o_cfg.name: case 'sgd': optimizer = SGD(fuse_pg[2], lr=o_cfg.lr_i, momentum=o_cfg.momentum, nesterov=True) case 'adam': optimizer = Adam(fuse_pg[2], lr=o_cfg.lr_i, betas=(o_cfg.momentum, 0.999)) case 'adamw': optimizer = AdamW(fuse_pg[2], lr=o_cfg.lr_i, betas=(o_cfg.momentum, 0.999), weight_decay=0) case _: optimizer = None assert NotImplemented, f'unsupported optimizer: {o_cfg.name}' self.optimizer = optimizer self.optimizer.add_param_group(groups[0]) self.optimizer.add_param_group(groups[1]) # init scheduler lr_fn = lambda x: (1 - x / config.train.epochs) * (1 - o_cfg.lr_f) + o_cfg.lr_f self.scheduler = LambdaLR(self.optimizer, lr_lambda=lr_fn) # init dataset & dataloader data_t = getattr(loader, config.dataset.name) # dataset type t_dataset = data_t(root=config.dataset.root, mode='train', config=config) v_dataset = data_t(root=config.dataset.root, mode='val', config=config) self.t_loader = DataLoader( t_dataset, batch_size=config.train.batch_size, shuffle=True, collate_fn=data_t.collate_fn, pin_memory=True, num_workers=config.train.num_workers, ) self.v_loader = DataLoader( v_dataset, batch_size=config.train.batch_size, collate_fn=data_t.collate_fn, pin_memory=True, num_workers=config.train.num_workers, ) def run(self): # epochs & eval interval & save interval epochs = self.config.train.epochs e_interval = self.config.train.eval_interval s_interval = self.config.train.save_interval # start training process for epoch in range(1, epochs + 1): # train t_l = tqdm(self.t_loader, disable=False, total=len(self.t_loader) if not self.config.debug.fast_run else 3, ncols=120) g_history = [AverageMeter() for _ in range(5)] # tot, src, adv, tar, det disc_history = AverageMeter(), AverageMeter() # target, detail log_dict = {} for sample in t_l: sample = dict_to_device(sample, self.fuse.device) # train generator g_loss, [src_l, adv_l, tar_l, det_l] = self.fuse.criterion_generator( ir=sample['ir'], vi=sample['vi'], mk=sample['mask'], w1=sample['ir_w'], w2=sample['vi_w'], d_warming=epoch <= self.config.loss.fuse.d_warm, ) g_history[0].update(g_loss.item()) _ = [g_history[idx + 1].update(v) for idx, v in enumerate([src_l, adv_l, tar_l, det_l])] self.optim(g_loss) # train target discriminator d_t_loss = self.fuse.criterion_dis_t( ir=sample['ir'], vi=sample['vi'], mk=sample['mask'], ) disc_history[0].update(d_t_loss.item()) self.optim(d_t_loss) # train detail discriminator d_d_loss = self.fuse.criterion_dis_d( ir=sample['ir'], vi=sample['vi'], mk=sample['mask'], ) disc_history[1].update(d_d_loss.item()) self.optim(d_d_loss) # fast run (jump out) if self.config.debug.fast_run and t_l.n > 2: logging.info('fast mode: jump') break # train logs g_l, src_l, adv_l, tar_l, det_l = [g_history[i].avg for i in range(5)] d_t_l, d_d_l = disc_history[0].avg, disc_history[1].avg log_dict |= {'g/tot': g_l, 'g/src': src_l, 'g/adv': adv_l, 'g/tar': d_t_l, 'g/det': d_d_l, 'disc/tar': tar_l, 'disc/det': det_l} logging.info(f'Epoch {epoch}/{epochs} | Generator Loss: {g_l:.4f} | Source Loss: {src_l:.4f} | Adversarial Loss: {adv_l:.4f}') # eval (fuse: show in wandb) if epoch % e_interval == 0 or self.config.debug.fast_run: e_l = tqdm(self.v_loader, disable=True) for sample in e_l: sample = dict_to_device(sample, self.fuse.device) fus = self.fuse.eval(ir=sample['ir'], vi=sample['vi']) log_dict |= {'fuse': wandb.Image(fus), 'mask': wandb.Image(sample['mask'])} break # update scheduler and show lr log_dict |= reduce(lambda x, y: x | y, [{f'lr_{i}': v['lr']} for i, v in enumerate(self.optimizer.param_groups)]) self.scheduler.step() # update wandb self.runs.log(log_dict) # save model if epoch % s_interval == 0 or self.config.debug.fast_run: ckpt = self.fuse.save_ckpt() torch.save(ckpt, self.save_dir / f'{str(epoch).zfill(5)}.pth') logging.info(f'Epoch {epoch}/{epochs} | Model Saved') def optim(self, loss: Tensor): self.optimizer.zero_grad() loss.backward() self.optimizer.step() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--cfg', default='config/default.yaml', help='config file path') parser.add_argument('--auth', help='wandb auth api key') args = parser.parse_args() train = TrainF(args.cfg, args.auth) train.run() ================================================ FILE: scripts/train_fd.py ================================================ import argparse import logging import sys from functools import reduce from itertools import chain from pathlib import Path import numpy import torch import wandb import yaml from kornia.color import ycbcr_to_rgb from kornia.metrics import AverageMeter from torch.nn.utils import clip_grad_norm_ from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from tqdm import tqdm import loader from config import from_dict, ConfigDict from module.detect.utils.metrics import ap_per_class from pipeline.detect import Detect from pipeline.fuse import Fuse from scripts.utils.smart_optimizer import smart_optimizer from tools.dict_to_device import dict_to_device class TrainFD: def __init__(self, config: str | Path | ConfigDict, wandb_key: str): # init logger log_f = '%(asctime)s | %(filename)s[line:%(lineno)d] | %(levelname)s | %(message)s' logging.basicConfig(level='INFO', format=log_f) logging.info(f'TarDAL-v1 Training Script') # init config if isinstance(config, str) or isinstance(config, Path): config = yaml.safe_load(Path(config).open('r')) config = from_dict(config) # convert dict to object else: config = config self.config = config # debug mode if config.debug.fast_run: logging.warning('fast run mode is on, only for debug!') # wandb run wandb.login(key=wandb_key) # wandb api key runs = wandb.init(project='TarDAL-v1', config=config, mode=config.debug.wandb_mode) self.runs = runs # init save folder save_dir = Path(config.save_dir) / runs.id save_dir.mkdir(parents=True, exist_ok=True) self.save_dir = save_dir logging.info(f'model weights will be saved to {str(save_dir)}') # load dataset data_t = getattr(loader, config.dataset.name) self.data_t = data_t t_dataset = data_t(root=config.dataset.root, mode='train', config=config) v_dataset = data_t(root=config.dataset.root, mode='val', config=config) if 'detect' not in t_dataset.type: logging.fatal(f'dataset {config.dataset.name} not support detect') sys.exit(1) self.t_loader = DataLoader( t_dataset, batch_size=config.train.batch_size, shuffle=True, collate_fn=data_t.collate_fn, pin_memory=True, num_workers=config.train.num_workers, ) self.v_loader = DataLoader( v_dataset, batch_size=config.train.batch_size, collate_fn=data_t.collate_fn, pin_memory=True, num_workers=config.train.num_workers, ) # init pipeline fuse = Fuse(config, mode='train') self.fuse = fuse detect = Detect(config, mode='train', nc=len(t_dataset.classes), classes=t_dataset.classes, labels=t_dataset.labels) self.detect = detect # freeze & grad for k, v in chain(fuse.generator.named_parameters(), detect.net.named_parameters()): v.requires_grad = True # train all layers if any(x in k for x in config.train.freeze): logging.info(f'freezing {k}') v.requires_grad = False # init fuse optimizer o_cfg = config.optimizer f_p, d_p = fuse.param_groups('g'), detect.param_groups() self.fd_opt = smart_optimizer(o_cfg, tuple(f_p[i] + d_p[i] for i in range(3))) self.disc_opt = smart_optimizer(o_cfg, fuse.param_groups('d'), lr=o_cfg.lr_d) # init scheduler lr_fn = lambda x: (1 - x / config.train.epochs) * (1 - o_cfg.lr_f) + o_cfg.lr_f self.lr_fn = lr_fn self.scheduler = LambdaLR(self.fd_opt, lr_lambda=lr_fn) # hyperparameters check # bridge warm & scheduler warm phase.0 if config.loss.bridge.warm != config.scheduler.warmup_epochs[0]: logging.warning(f'overwriting bridge warm {config.loss.bridge.warm} with {config.scheduler.warmup_epochs[0]}') config.loss.bridge.warm = config.scheduler.warmup_epochs[0] # discriminator warm & bridge warm if config.loss.fuse.d_warm >= config.loss.bridge.warm / 2: logging.warning(f'overwriting discriminator warm {config.loss.fuse.d_warm} with {round(config.loss.bridge.warm / 2)}') config.loss.fuse.d_warm = round(config.loss.bridge.warm / 2) def run(self): # epochs & eval interval & save interval epochs = self.config.train.epochs e_interval = self.config.train.eval_interval s_interval = self.config.train.save_interval # history switch best_map = -1 # start training process l_opt_shot = -1 n_batch_size = 64 accumulate = max(round(n_batch_size / self.config.train.batch_size), 1) for epoch in range(1, epochs + 1): # train t_l = tqdm(self.t_loader, disable=False, total=len(self.t_loader) if not self.config.debug.fast_run else 3, ncols=120) # recorder g_history = AverageMeter() # generator total loss f_history = [AverageMeter() for _ in range(5)] # fuse loss: tot, src, adv, tar, det d_history = [AverageMeter() for _ in range(4)] # detect loss: tot, box, obj, cls disc_history = AverageMeter(), AverageMeter() # discriminator loss: target, detail log_dict = {} # warm up shots, max(warmup_epochs, 100 shots) w_config = self.config.scheduler w_shots_0 = max(round(w_config.warmup_epochs[0] * len(self.t_loader)), 100) # bridge warm w_shots_1 = max(round(w_config.warmup_epochs[1] * len(self.t_loader)), 100) # normal warm w_shots = (w_shots_0, w_shots_1) # process self.fd_opt.zero_grad() for idx, sample in enumerate(t_l): # warm up c_shots = idx + len(self.t_loader) * (epoch - 1) if c_shots < w_shots[0]: for jdx, x in enumerate(self.fd_opt.param_groups): x['lr'] = w_config.warmup_bias_lr if jdx == 0 else 0 if 'momentum' in x: x['momentum'] = w_config.warmup_momentum if w_shots[0] <= c_shots < w_shots[1]: x_shot = [c_shots, w_shots[1] + c_shots] # accumulate = max(1, numpy.interp(c_shots, x_shot, [1, n_batch_size / self.config.train.batch_size]).round()) for jdx, x in enumerate(self.fd_opt.param_groups): o_config = self.config.optimizer # bias lr falls from 0.1 to lr_i, all other lrs rise from 0.0 to lr_i w_range = [w_config.warmup_bias_lr if jdx == 0 else 0, x['initial_lr'] * self.lr_fn(epoch - 1)] x['lr'] = numpy.interp(c_shots, x_shot, w_range) if 'momentum' in x: x['momentum'] = numpy.interp(c_shots, x_shot, [w_config.warmup_momentum, o_config.momentum]) lr_s = [x['lr'] for x in self.fd_opt.param_groups] logging.debug(f'adjust lr {lr_s[0]:.6f} {lr_s[1]:.6f} {lr_s[2]:.6f}') # forward sample = dict_to_device(sample, self.fuse.device) # train generator # ir & vi -> f_net -> fus -> d_net -> obj # loss: fus -> src + adv, obj -> ground truth # f_net forward and cal loss f_loss, [src_l, adv_l, tar_l, det_l] = self.fuse.criterion_generator( ir=sample['ir'], vi=sample['vi'], mk=sample['mask'], w1=sample['ir_w'], w2=sample['vi_w'], d_warming=epoch <= self.config.loss.fuse.d_warm, ) fus = self.fuse.eval(ir=sample['ir'], vi=sample['vi']) if epoch <= self.config.loss.bridge.warm: fus.detach_() # (det -> det, fuse -> fuse, det no-> fuse) # recolor if self.data_t.color: fus = torch.cat([fus, sample['cbcr']], dim=1) fus = ycbcr_to_rgb(fus) # d_net forward and cal loss d_loss, [box_l, obj_l, cls_l] = self.detect.criterion( imgs=fus, targets=sample['labels'], ) # merge loss b_c = self.config.loss.bridge g_loss = b_c['fuse'] * f_loss + b_c['detect'] * d_loss # generator total loss g_history.update(g_loss.item()) _ = [f_history[idx].update(v) for idx, v in enumerate([f_loss.item(), src_l, adv_l, tar_l, det_l])] _ = [d_history[idx].update(v) for idx, v in enumerate([d_loss.item(), box_l, obj_l, cls_l])] # optimize g_loss.backward() if c_shots - l_opt_shot >= accumulate: clip_grad_norm_(chain(self.fuse.generator.parameters(), self.detect.net.parameters()), max_norm=10.0) self.fd_opt.step() self.fd_opt.zero_grad() l_opt_shot = c_shots logging.debug(f'optimize f+d | shots: {c_shots} | accumulate: {accumulate} | last: {l_opt_shot}') # train target discriminator d_t_loss = self.fuse.criterion_dis_t( ir=sample['ir'], vi=sample['vi'], mk=sample['mask'], ) disc_history[0].update(d_t_loss.item()) self.disc_opt.zero_grad() d_t_loss.backward() self.disc_opt.step() # train detail discriminator d_d_loss = self.fuse.criterion_dis_d( ir=sample['ir'], vi=sample['vi'], mk=sample['mask'], ) disc_history[1].update(d_d_loss.item()) self.disc_opt.zero_grad() d_d_loss.backward() self.disc_opt.step() # update description t_l.set_description(f'{epoch}/{epochs} | g: {g_history.avg:.4f} | f: {f_history[0].avg:.4f} | d: {d_history[0].avg:.4f}') # fast run (jump out) if self.config.debug.fast_run and t_l.n > 2: logging.info('fast mode: jump') break # train logs # fuse loss f_l, src_l, adv_l, tar_l, det_l = [f_history[idx].avg for idx in range(5)] log_dict |= {'fus/tot': f_l, 'fus/src': src_l, 'fus/adv': adv_l, 'fus/tar': tar_l, 'fus/det': det_l} # detect loss d_l, box_l, obj_l, cls_l = [d_history[idx].avg for idx in range(4)] log_dict |= {'det/tot': d_l, 'det/box': box_l, 'det/obj': obj_l, 'det/cls': cls_l} # generator loss g_l = g_history.avg log_dict |= {'gen/tot': g_l, 'gen/fus': f_l, 'gen/det': d_l} # discriminator loss d_t_l, d_d_l = [disc_history[idx].avg for idx in range(2)] log_dict |= {'disc/tar': d_t_l, 'disc/det': d_d_l} # learning rate lrs = [x['lr'] for x in self.fd_opt.param_groups] log_dict |= {'lr/0': lrs[0], 'lr/1': lrs[1], 'lr/2': lrs[2]} # log to console logging.info(f'Epoch {epoch}/{epochs} | Generator Loss: {g_l:.4f} | Fuse loss: {f_l:.4f} | Detect loss: {d_l:.4f}') # update scheduler self.scheduler.step() # eval (fuse & detect: print result in wandb) if epoch % e_interval == 0 or self.config.debug.fast_run: e_l = tqdm(self.v_loader, disable=False, total=len(self.v_loader) if not self.config.debug.fast_run else 3, ncols=120) # matrix seen = 0 dt, p, r, f1, mp, mr, map50, map_all = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 j_dict, stats, ap50, ap, ap_class = [], [], [], [], [] # process for sample in e_l: sample = dict_to_device(sample, self.fuse.device) # f_net fus = self.fuse.eval(ir=sample['ir'], vi=sample['vi']) # recolor if self.data_t.color: fus = torch.cat([fus, sample['cbcr']], dim=1) fus = ycbcr_to_rgb(fus) # d_net seen_x, preview = self.detect.eval(imgs=fus, targets=sample['labels'], stats=stats, preview='detect' not in log_dict) seen += seen_x if preview is not None and 'detect' not in log_dict: log_dict |= {'detect': wandb.Image(preview)} # fast run (jump out) if self.config.debug.fast_run and t_l.n > 2: logging.info('fast mode: jump') break # compute statistics stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] names = reduce(lambda x, y: x | y, [{idx: name} for idx, name in enumerate(self.data_t.classes)]) if len(stats) and stats[0].any(): tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, names=names) ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 mp, mr, map50, map_all = p.mean(), r.mean(), ap50.mean(), ap.mean() num_t = numpy.bincount(stats[3].astype(int), minlength=len(self.data_t.classes)) # number of targets per class if num_t.sum() == 0: logging.warning(f'no labels found, can not compute metrics without labels.') # eval logs log_dict |= {'eval/precision': mp, 'eval/recall': mr, 'eval/map50': map50, 'eval/map': map_all} # log to console (per class) logging.info(f'Epoch {epoch}/{epochs} | Precision: {mp:.4f} | Recall: {mr:.4f} | mAP50: {map50:.4f} | mAP: {map_all:.4f}') if len(stats) and len(self.data_t.classes) > 1: for i, c in enumerate(ap_class): logging.info( f'{names[c]} | tot: {num_t[c]} | p: {p[i]:.4f} | r: {r[i]:.4f} | ap50: {ap50[i]:.4f} | ap: {ap[i]:.4f}' ) # mark best if map_all > best_map: best_map = map_all Path(self.save_dir / 'meta.txt').write_text(f'best_map: {best_map:.4f} | epoch: {epoch}') ckpt = self.fuse.save_ckpt() | self.detect.save_ckpt() torch.save(ckpt, self.save_dir / f'{str(epoch).zfill(5)}-{best_map:.4f}.pth') # update wandb self.runs.log(log_dict) # save model if epoch % s_interval == 0 or self.config.debug.fast_run: ckpt = self.fuse.save_ckpt() | self.detect.save_ckpt() torch.save(ckpt, self.save_dir / f'{str(epoch).zfill(5)}.pth') logging.info(f'Epoch {epoch}/{epochs} | Model Saved') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--cfg', default='config/default.yaml', help='config file path') parser.add_argument('--auth', help='wandb auth api key') args = parser.parse_args() train = TrainFD(args.cfg, args.auth) train.run() ================================================ FILE: scripts/utils/smart_optimizer.py ================================================ from typing import Tuple, List, Optional from torch.optim import Optimizer, AdamW, Adam, SGD from config import ConfigDict def smart_optimizer(config: ConfigDict, param_group: Tuple[List, List, List], lr: Optional[float] = None) -> Optimizer: if lr is not None: config.lr_i = lr groups = [ {'params': param_group[0], 'lr': config.lr_i, 'weight_decay': config.weight_decay}, {'params': param_group[1], 'lr': config.lr_i, 'weight_decay': 0}, ] match config.name: case 'sgd': opt = SGD(param_group[2], lr=config.lr_i, momentum=config.momentum, nesterov=True) case 'adam': opt = Adam(param_group[2], lr=config.lr_i, betas=(config.momentum, 0.999)) case 'adamw': opt = AdamW(param_group[2], lr=config.lr_i, betas=(config.momentum, 0.999), weight_decay=0) case _: opt = None assert NotImplemented, f'unsupported optimizer: {config.name}' opt.add_param_group(groups[0]) opt.add_param_group(groups[1]) return opt ================================================ FILE: tools/choose_images.py ================================================ from functools import reduce from pathlib import Path from typing import Literal import cv2 import numpy def choose_images(root: str | Path, mode: str = Literal['train', 'val', 'pred']): root = Path(root) names = [x.name for x in sorted(root.glob('ir/*')) if x.suffix in ['.png', '.jpg', '.bmp']] save = [] for name in names: x = cv2.imread(str(root / 'ir' / name), cv2.IMREAD_GRAYSCALE) y = cv2.imread(str(root / 'vi' / name), cv2.IMREAD_GRAYSCALE) t = numpy.hstack([x, y]) cv2.imshow(name, t) if cv2.waitKey() == ord('s'): save.append(name) cv2.destroyWindow(name) meta = root / 'meta' meta.mkdir(parents=True, exist_ok=True) meta_f = meta / f'{mode}.txt' meta_f.write_text(reduce(lambda i, j: i + j, [t + '\n' for t in save])) if __name__ == '__main__': choose_images('data/tno', mode='train') ================================================ FILE: tools/convert_to_png.py ================================================ import argparse import logging from pathlib import Path import cv2 from tqdm import tqdm def convert_to_png(src: str | Path, color: bool): img_list = [x for x in Path(src).rglob('*') if x.suffix in ['.bmp', '.jpg', '.tiff']] process = tqdm(sorted(img_list)) for o_path in process: n_path = o_path.with_suffix('.png') process.set_description(f'convert {o_path.name} to {n_path.name}') img = cv2.imread(str(o_path), cv2.IMREAD_COLOR if color else cv2.IMREAD_GRAYSCALE) cv2.imwrite(str(n_path), img) o_path.unlink() if __name__ == '__main__': logging.basicConfig(level='DEBUG') parser = argparse.ArgumentParser('convert to png') parser.add_argument('--src', help='folder need to be converted') parser.add_argument('--color', action='store_true', help='use color mode (recommend on for vis, off for ir)') config = parser.parse_args() convert_to_png(**vars(config)) ================================================ FILE: tools/data_preview.py ================================================ import argparse from pathlib import Path from typing import Optional import cv2 import torch from kornia import image_to_tensor, tensor_to_image from torch import Tensor from torchvision.utils import draw_bounding_boxes from tqdm import tqdm import loader from loader.utils.reader import label_read def data_preview(img_f: str | Path, lbl_f: str | Path, dst_f: str | Path, dataset: Optional[str] = None): # create dst dst_f = Path(dst_f) dst_f.mkdir(parents=True, exist_ok=True) # images list img_f, lbl_f = Path(img_f), Path(lbl_f) img_l = sorted([x.stem for x in img_f.glob('*.png')]) # dataset settings classes, palette = [], [] if dataset is not None: dataset = getattr(loader, dataset) classes = dataset.classes palette = dataset.palette t_l = tqdm(img_l) for stem in t_l: t_l.set_description(f'draw on {stem}') lbl = label_read(lbl_f / f'{stem}.txt') img = image_to_tensor(cv2.imread(str(img_f / f'{stem}.png'))) lbl[:, 1:] *= Tensor([img.shape[-1], img.shape[-2], img.shape[-1], img.shape[-2]]) boxes = [x[1:] for x in lbl] if dataset is not None: cls = [classes[int(x[0])] for x in lbl] colors = [palette[int(x[0])] for x in lbl] img = draw_bounding_boxes(img, torch.stack(boxes, dim=0), cls, colors, width=3) else: img = draw_bounding_boxes(img, torch.stack(boxes, dim=0), width=3) cv2.imwrite(str(dst_f / f'{stem}.png'), tensor_to_image(img)) if __name__ == '__main__': parser = argparse.ArgumentParser('data preview') parser.add_argument('--img', help='image folder') parser.add_argument('--lbl', help='label folder') parser.add_argument('--dst', help='mask output folder (we will create it if not exists)') parser.add_argument('--cls', required=False, help='dataset type (random if not specified)') config = parser.parse_args() data_preview(img_f=config.img, lbl_f=config.lbl, dst_f=config.dst, dataset=config.cls) ================================================ FILE: tools/dict_to_device.py ================================================ from typing import Dict from torch import Tensor from torch.types import Device def dict_to_device(d: Dict, device: Device) -> Dict | None: if d is None: return None for k, v in d.items(): if isinstance(v, Tensor): d[k] = d[k].to(device) return d ================================================ FILE: tools/environment_probe.py ================================================ import logging import sys import torch class EnvironmentProbe: """ Detects the configuration of the environment and returns devices status. """ def __init__(self): python_v = sys.version.split()[0] pytorch_v = torch.__version__ cuda_s = torch.cuda.is_available() device = torch.cuda.current_device() if cuda_s else 'cpu' device_n = torch.cuda.get_device_name(device) logging.info(f'python: {python_v} | pytorch: {pytorch_v} | gpu: {device_n if cuda_s else False}') self.device = device def memory_status(self): if not torch.cuda.is_available(): return {'current': 'unavailable', 'max': 'unavailable'} memory_a = torch.cuda.memory_allocated(self.device) / 1024 ** 3 memory_ma = torch.cuda.max_memory_allocated(self.device) / 1024 ** 3 logging.debug(f'memory: {memory_a:.2f}GB (history max: {memory_ma:.2f}GB)') return {'current': memory_a, 'max': memory_ma} if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) probe = EnvironmentProbe() probe.memory_status() ================================================ FILE: tools/generate_mask.py ================================================ import argparse import logging from pipeline.saliency import Saliency def generate_mask(url: str, src: str, dst: str): saliency = Saliency(url=url) saliency.inference(src=src, dst=dst) if __name__ == '__main__': logging.basicConfig(level='INFO') default_url = 'https://github.com/JinyuanLiu-CV/TarDAL/releases/download/v1.0.0/u2netp.pth' parser = argparse.ArgumentParser('mask generator') parser.add_argument('--url', default=default_url, help='checkpoint url') parser.add_argument('--src', help='folder need to be detected') parser.add_argument('--dst', help='mask output folder (we will create it if not exists)') config = parser.parse_args() generate_mask(**vars(config)) ================================================ FILE: tools/scenario_reader.py ================================================ import json import logging from functools import reduce from pathlib import Path def scenario_counter(src: str | Path): # read scenario from json file src = Path(src) scenarios = json.load(open(src, 'r')) # output as tree format logging.debug(f'total scenarios: {len(scenarios)}') tot_t_frame, tot_v_frame = 0, 0 for scenario in scenarios: t_frame, v_frame = 0, 0 frame_buf = [] # count frame for scene in scenario['scene']: frame = 0 for fr in scene['range']: frame += fr['max'] - fr['min'] + 1 frame_buf.append(f' | -- {scene["name"]} (frame: {frame}, mode: {scene["mode"]})') if scene['mode'] == 'train': t_frame += frame else: v_frame += frame # output logging.debug(f'-- {scenario["name"]} (scenes: {len(scenario["scene"])}, train: {t_frame}, val: {v_frame})') _ = [logging.debug(x) for x in frame_buf] tot_t_frame += t_frame tot_v_frame += v_frame logging.debug(f'total train frame: {tot_t_frame}, total val frame: {tot_v_frame}') def generate_meta(root: str | Path): root = Path(root) # read scenario from json file t_frame, v_frame = [], [] scenarios = json.load((root / 'meta' / 'scenario.json').open('r')) # count frame for scenario in scenarios: for scene in scenario['scene']: for fr in scene['range']: frame = list(range(fr['min'], fr['max'] + 1)) if scene['mode'] == 'train': t_frame += frame else: v_frame += frame # sort by index t_frame.sort() v_frame.sort() # write to file (root / 'meta' / 'train.txt').write_text(reduce(lambda i, j: i + j, [f'{str(x).zfill(5)}.png\n' for x in t_frame])) (root / 'meta' / 'val.txt').write_text(reduce(lambda i, j: i + j, [f'{str(x).zfill(5)}.png\n' for x in v_frame])) # total frame logging.info(f'total train frame: {len(t_frame)}, total val frame: {len(v_frame)}') if __name__ == '__main__': scenario_counter('data/m3fd/meta/scenario.json') generate_meta('data/m3fd') ================================================ FILE: train.py ================================================ import argparse import logging from pathlib import Path import torch.backends.cudnn import yaml import scripts from config import from_dict if __name__ == '__main__': # args parser parser = argparse.ArgumentParser() parser.add_argument('--cfg', default='config/default.yaml', help='config file path') parser.add_argument('--auth', help='wandb auth api key') args = parser.parse_args() # init config config = yaml.safe_load(Path(args.cfg).open('r')) config = from_dict(config) # convert dict to object config = config # init logger log_f = '%(asctime)s | %(filename)s[line:%(lineno)d] | %(levelname)s | %(message)s' logging.basicConfig(level=config.debug.log, format=log_f) # init device & anomaly detector torch.backends.cudnn.benchmark = True torch.autograd.set_detect_anomaly(True) # choose train script logging.info(f'enter {config.strategy} train mode') match config.strategy: case 'fuse': train_p = getattr(scripts, 'TrainF') case 'detect': if config.loss.bridge.fuse != 0: logging.warning('overwrite fuse loss weight to 0') config.loss.bridge.fuse = 0 train_p = getattr(scripts, 'TrainFD') case 'fuse & detect': train_p = getattr(scripts, 'TrainFD') case _: raise ValueError(f'unknown strategy: {config.strategy}') # create script instance train = train_p(config, wandb_key=args.auth) train.run() ================================================ FILE: tutorial.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "source": [ "# TarDAL online tutorial | CVPR 2022\n", "\n", "This is the **official** TarDAL notebook, and is freely available for everyone.\n", "For more information please visit [GitHub Repository](https://github.com/JinyuanLiu-CV/TarDAL).\n", "Thank you!" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%% md\n" } } }, { "cell_type": "markdown", "source": [ "## Setup Environment\n", "\n", "Install requirements for TarDAL." ], "metadata": { "collapsed": false, "pycharm": { "name": "#%% md\n" } } }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "!nvidia-smi # check GPU environment\n", "!git clone https://github.com/JinyuanLiu-CV/TarDAL.git # clone repository from GitHub\n", "\n", "# use python 3.10\n", "!wget https://github.com/korakot/kora/releases/download/v0.10/py310.sh\n", "!bash ./py310.sh -b -f -p /usr/local\n", "!python -m ipykernel install --name \"py310\" --user\n", "\n", "%cd TarDAL\n", "%pip install -r requirements.txt # install tardal requirements\n", "%pip install -r module/detect/requirements.txt # install yolov5 requirements" ] }, { "cell_type": "markdown", "source": [ "## Fuse or Eval\n", "\n", "### Load Image (List)\n", "\n", "infrared image(s):\n", "![infrared](assets/sample/s1/ir/M3FD_00471.png)\n", "\n", "visible image(s):\n", "![visible](assets/sample/s1/vi/M3FD_00471.png)\n", "\n", "### Init TarDAL Pipeline" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": null, "outputs": [], "source": [ "from scripts import InferF\n", "from config import from_dict\n", "import yaml\n", "from pathlib import Path\n", "from IPython import display\n", "\n", "# init config\n", "config = yaml.safe_load(Path('config/official/colab.yaml').open('r'))\n", "config = from_dict(config) # convert dict to object\n", "\n", "# init infer pipeline\n", "infer_p = InferF(config, save_dir='runs/sample/s1')\n", "\n", "# generate fusion sample\n", "infer_p.run()\n", "\n", "# display sample\n", "display.Image('runs/sample/s1/M3FD_00471.png')" ], "metadata": { "collapsed": false, "pycharm": { "is_executing": true } } }, { "cell_type": "markdown", "source": [], "metadata": { "collapsed": false } } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.6" } }, "nbformat": 4, "nbformat_minor": 0 }