Repository: enoche/MMRec Branch: master Commit: 61098b5aba68 Files: 83 Total size: 495.4 KB Directory structure: gitextract_7sv64gl1/ ├── .gitignore ├── .idea/ │ ├── .gitignore │ ├── MMRec.iml │ ├── deployment.xml │ ├── inspectionProfiles/ │ │ ├── Project_Default.xml │ │ └── profiles_settings.xml │ ├── misc.xml │ ├── modules.xml │ └── vcs.xml ├── LICENSE ├── README.md ├── data/ │ └── README.md ├── evaluation/ │ └── README.md ├── preprocessing/ │ ├── 0rating2inter.ipynb │ ├── 1splitting.ipynb │ ├── 2reindex-feat.ipynb │ ├── 3feat-encoder.ipynb │ ├── README.md │ └── dualgnn-gen-u-u-matrix.py ├── requirements.txt └── src/ ├── common/ │ ├── abstract_recommender.py │ ├── encoders.py │ ├── init.py │ ├── loss.py │ └── trainer.py ├── configs/ │ ├── dataset/ │ │ ├── baby.yaml │ │ ├── clothing.yaml │ │ ├── elec.yaml │ │ ├── microlens.yaml │ │ └── sports.yaml │ ├── mg.yaml │ ├── model/ │ │ ├── BM3.yaml │ │ ├── BPR.yaml │ │ ├── DAMRS.yaml │ │ ├── DRAGON.yaml │ │ ├── DualGNN.yaml │ │ ├── FREEDOM.yaml │ │ ├── GRCN.yaml │ │ ├── ItemKNNCBF.yaml │ │ ├── LATTICE.yaml │ │ ├── LGMRec.yaml │ │ ├── LayerGCN.yaml │ │ ├── LightGCN.yaml │ │ ├── MGCN.yaml │ │ ├── MMGCN.yaml │ │ ├── MVGAE.yaml │ │ ├── PGL.yaml │ │ ├── SELFCFED_LGN.yaml │ │ ├── SLMRec.yaml │ │ ├── SMORE.yaml │ │ └── VBPR.yaml │ └── overall.yaml ├── main.py ├── models/ │ ├── bm3.py │ ├── bpr.py │ ├── damrs.py │ ├── dragon.py │ ├── dualgnn.py │ ├── freedom.py │ ├── grcn.py │ ├── itemknncbf.py │ ├── lattice.py │ ├── layergcn.py │ ├── lgmrec.py │ ├── lightgcn.py │ ├── mgcn.py │ ├── mmgcn.py │ ├── mvgae.py │ ├── pgl.py │ ├── selfcfed_lgn.py │ ├── slmrec.py │ ├── smore.py │ └── vbpr.py └── utils/ ├── configurator.py ├── data_utils.py ├── dataloader.py ├── dataset.py ├── logger.py ├── metrics.py ├── misc.py ├── quick_start.py ├── topk_evaluator.py └── utils.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class /data/baby/ # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ pip-wheel-metadata/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # PEP 582; used by e.g. github.com/David-OConnor/pyflow __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ ================================================ FILE: .idea/.gitignore ================================================ # Default ignored files /shelf/ /workspace.xml # Editor-based HTTP Client requests /httpRequests/ # Datasource local storage ignored files /dataSources/ /dataSources.local.xml ================================================ FILE: .idea/MMRec.iml ================================================ ================================================ FILE: .idea/deployment.xml ================================================ ================================================ FILE: .idea/inspectionProfiles/Project_Default.xml ================================================ ================================================ FILE: .idea/inspectionProfiles/profiles_settings.xml ================================================ ================================================ FILE: .idea/misc.xml ================================================ ================================================ FILE: .idea/modules.xml ================================================ ================================================ FILE: .idea/vcs.xml ================================================ ================================================ FILE: LICENSE ================================================ GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . ================================================ FILE: README.md ================================================ # MMRec
$\text{MMRec}$: A modern MultiModal Recommendation toolbox that simplifies your research [arXiv](https://arxiv.org/abs/2302.03497). :point_right: Check our [comprehensive survey on MMRec, arXiv](https://arxiv.org/abs/2302.04473). :point_right: Check the awesome [multimodal recommendation resources](https://github.com/enoche/MultimodalRecSys). ## Toolbox

## Supported Models source code at: `src\models` | **Model** | **Paper** | **Conference/Journal** | **Code** | |------------------|--------------------------------------------------------------------------------------------------------|------------------------|-------------| | **General models** | | | | | SelfCF | [SelfCF: A Simple Framework for Self-supervised Collaborative Filtering](https://arxiv.org/abs/2107.03019) | ACM TORS'23 | selfcfed_lgn.py | | LayerGCN | [Layer-refined Graph Convolutional Networks for Recommendation](https://arxiv.org/abs/2207.11088) | ICDE'23 | layergcn.py | | **Multimodal models** | | | | | VBPR | [VBPR: Visual Bayesian Personalized Ranking from Implicit Feedback](https://arxiv.org/abs/1510.01784) | AAAI'16 | vbpr.py | | MMGCN | [MMGCN: Multi-modal Graph Convolution Network for Personalized Recommendation of Micro-video](https://staff.ustc.edu.cn/~hexn/papers/mm19-MMGCN.pdf) | MM'19 | mmgcn.py | | ItemKNNCBF | [Are We Really Making Much Progress? A Worrying Analysis of Recent Neural Recommendation Approaches](https://arxiv.org/abs/1907.06902) | RecSys'19 | itemknncbf.py | | GRCN | [Graph-Refined Convolutional Network for Multimedia Recommendation with Implicit Feedback](https://arxiv.org/abs/2111.02036) | MM'20 | grcn.py | | MVGAE | [Multi-Modal Variational Graph Auto-Encoder for Recommendation Systems](https://ieeexplore.ieee.org/abstract/document/9535249) | TMM'21 | mvgae.py | | DualGNN | [DualGNN: Dual Graph Neural Network for Multimedia Recommendation](https://ieeexplore.ieee.org/abstract/document/9662655) | TMM'21 | dualgnn.py | | LATTICE | [Mining Latent Structures for Multimedia Recommendation](https://arxiv.org/abs/2104.09036) | MM'21 | lattice.py | | SLMRec | [Self-supervised Learning for Multimedia Recommendation](https://ieeexplore.ieee.org/document/9811387) | TMM'22 | slmrec.py | | **Newly added** | | | | | BM3 | [Bootstrap Latent Representations for Multi-modal Recommendation](https://dl.acm.org/doi/10.1145/3543507.3583251) | WWW'23 | bm3.py | | FREEDOM | [A Tale of Two Graphs: Freezing and Denoising Graph Structures for Multimodal Recommendation](https://arxiv.org/abs/2211.06924) | MM'23 | freedom.py | | MGCN | [Multi-View Graph Convolutional Network for Multimedia Recommendation](https://arxiv.org/abs/2308.03588) | MM'23 | mgcn.py | | DRAGON | [Enhancing Dyadic Relations with Homogeneous Graphs for Multimodal Recommendation](https://arxiv.org/abs/2301.12097) | ECAI'23 | dragon.py | | MG | [Mirror Gradient: Towards Robust Multimodal Recommender Systems via Exploring Flat Local Minima](https://arxiv.org/abs/2402.11262) | WWW'24 | common/trainer.py | | LGMRec | [LGMRec: Local and Global Graph Learning for Multimodal Recommendation](https://arxiv.org/abs/2312.16400) | AAAI'24 | lgmrec.py | | DA-MRS | [Improving Multi-modal Recommender Systems by Denoising and Aligning Multi-modal Content and User Feedback](https://dl.acm.org/doi/10.1145/3637528.3671703) | KDD'24 | damrs.py | | SMORE | [Spectrum-based Modality Representation Fusion Graph Convolutional Network for Multimodal Recommendation](https://arxiv.org/abs/2412.14978) | WSDM'25 | smore.py | | PGL | [Mind Individual Information! Principal Graph Learning for Multimedia Recommendation](https://ojs.aaai.org/index.php/AAAI/article/view/33429) | AAAI'25 | pgl.py | #### Please consider to cite our paper if this framework helps you, thanks: ``` @inproceedings{zhou2023bootstrap, author = {Zhou, Xin and Zhou, Hongyu and Liu, Yong and Zeng, Zhiwei and Miao, Chunyan and Wang, Pengwei and You, Yuan and Jiang, Feijun}, title = {Bootstrap Latent Representations for Multi-Modal Recommendation}, booktitle = {Proceedings of the ACM Web Conference 2023}, pages = {845–854}, year = {2023} } @article{zhou2023comprehensive, title={A Comprehensive Survey on Multimodal Recommender Systems: Taxonomy, Evaluation, and Future Directions}, author={Hongyu Zhou and Xin Zhou and Zhiwei Zeng and Lingzi Zhang and Zhiqi Shen}, year={2023}, journal={arXiv preprint arXiv:2302.04473}, } @inproceedings{zhou2023mmrec, title={Mmrec: Simplifying multimodal recommendation}, author={Zhou, Xin}, booktitle={Proceedings of the 5th ACM International Conference on Multimedia in Asia Workshops}, pages={1--2}, year={2023} } ``` ================================================ FILE: data/README.md ================================================ ## Data Download from Google Drive: [Baby/Sports/Elec](https://drive.google.com/drive/folders/13cBy1EA_saTUuXxVllKgtfci2A09jyaG?usp=sharing) The data already contains text and image features extracted from Sentence-Transformers and CNN. An alternative dataset for short-video recommendations: [MicroLens](https://drive.google.com/drive/folders/14UyTAh_YyDV8vzXteBJiy9jv8TBDK43w?usp=drive_link). Thanks to @yxni98! * Please move your downloaded data into this dir for model training. ================================================ FILE: evaluation/README.md ================================================ # EVALUATING THE SOTA MODELS we validate the effectiveness and efficiency of state-of-the-art multimodal recommendation models by conducting extensive experiments on four public datasets. Furthermore, we investigate the principal determinants of model performance, including the impact of different modality information and data split methods. ## Statistics of the evaluated datasets. | Datasets | # Users | # Items | # Interactions |Sparsity| |----------|--------|---------|---------|---------| | Baby | 19,445 | 7,050 |160,792|99.8827%| | Sports | 35,598 | 18,357 |296,337|99.9547%| | FoodRec | 61,668 | 21,874 |1,654,456|99.8774%| | Elec | 192,403 | 63,001 |1,689,188|99.9861%| ## Experimental Results | Dataset | Model | Recall@10 | Recall@20 | Recall@50 | NDCG@10 | NDCG@20 | NDCG@50 | |-------------------------|----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------| | **Baby** | BPR | 0.0357 | 0.0575 | 0.1054 | 0.0192 | 0.0249 | 0.0345 | | | LightGCN | 0.0479 | 0.0754 | 0.1333 | 0.0257 | 0.0328 | 0.0445 | | | VBPR | 0.0423 | 0.0663 | 0.1212 | 0.0223 | 0.0284 | 0.0396 | | | MMGCN | 0.0378 | 0.0615 | 0.1100 | 0.0200 | 0.0261 | 0.0359 | | | DualGNN | 0.0448 | 0.0716 | 0.1288 | 0.0240 | 0.0309 | 0.0424 | | | GRCN | 0.0539 | 0.0833 | 0.1464 | 0.0288 | 0.0363 | 0.0490 | | | LATTICE | 0.0547 | 0.0850 | 0.1477 | 0.0292 | 0.0370 | 0.0497 | | | BM3 | 0.0564 | 0.0883 | 0.1477 | 0.0301 | 0.0383 | 0.0502 | | | SLMRec | 0.0529 | 0.0775 | 0.1252 | 0.0290 | 0.0353 | 0.0450 | | | ADDVAE | _0.0598_ | _0.091_ | _0.1508_ | _0.0323_ | _0.0404_ | _0.0525_ | | | FREEDOM | **0.0627** | **0.0992** | **0.1655** | **0.0330** | **0.0424** | **0.0558** | | **Sports** | BPR | 0.0432 | 0.0653 | 0.1083 | 0.0241 | 0.0298 | 0.0385 | | | LightGCN | 0.0569 | 0.0864 | 0.1414 | 0.0311 | 0.0387 | 0.0498 | | | VBPR | 0.0558 | 0.0856 | 0.1391 | 0.0307 | 0.0384 | 0.0492 | | | MMGCN | 0.0370 | 0.0605 | 0.1078 | 0.0193 | 0.0254 | 0.0350 | | | DualGNN | 0.0568 | 0.0859 | 0.1392 | 0.0310 | 0.0385 | 0.0493 | | | GRCN | 0.0598 | 0.0915 | 0.1509 | 0.0332 | 0.0414 | 0.0535 | | | LATTICE | 0.0620 | 0.0953 | 0.1561 | 0.0335 | 0.0421 | 0.0544 | | | BM3 | 0.0656 | 0.0980 | 0.1581 | 0.0355 | 0.0438 | 0.0561 | | | SLMRec | 0.0663 | 0.0990 | 0.1543 | 0.0365 | 0.0450 | 0.0562 | | | ADDVAE | _0.0709_ | _0.1035_ | _0.1663_ | _0.0389_ | _0.0473_ | _0.0600_ | | | FREEDOM | **0.0717** | **0.1089** | **0.1768** | **0.0385** | **0.0481** | **0.0618** | | **FoodRec** | BPR | 0.0303 | 0.0511 | 0.0948 | 0.0188 | 0.0250 | 0.0356 | | | LightGCN | 0.0331 | 0.0546 | 0.1003 | 0.0210 | 0.0274 | 0.0386 | | | VBPR | 0.0306 | 0.0516 | 0.0972 | 0.0191 | 0.0254 | 0.0365 | | | MMGCN | 0.0307 | 0.0510 | 0.0943 | 0.0192 | 0.0253 | 0.0359 | | | DualGNN | _0.0338_ | 0.0559 | _0.1027_ | _0.0214_ | _0.0280_ | _0.0394_ | | | GRCN | **0.0356** | **0.0578** | **0.1063** | **0.0226** | **0.0295** | **0.0411** | | | LATTICE | 0.0336 | _0.0560_| 0.1012 | 0.0211 | 0.0277 | 0.0388 | | | BM3 | 0.0334 | 0.0553 | 0.0994 | 0.0208 | 0.0274 | 0.0381 | | | SLMRec | 0.0323 | 0.0515 | 0.0907 | 0.0208 | 0.0266 | 0.0362 | | | ADDVAE | 0.0309 | 0.0508 | 0.093 | 0.0186 | 0.0247 | 0.035 | | | FREEDOM | 0.0333 | 0.0556 | 0.1009 | 0.0212 | 0.0279 | 0.0389 | | **Elec** | BPR | 0.0235 | 0.0367 | 0.0621 | 0.0127 | 0.0161 | 0.0212 | | | LightGCN | 0.0363 | 0.0540 | 0.0879 | 0.0204 | 0.0250 | 0.0318 | | | VBPR | 0.0293 | 0.0458 | 0.0778 | 0.0159 | 0.0202 | 0.0267 | | | MMGCN | 0.0213 | 0.0343 | 0.0610 | 0.0112 | 0.0146 | 0.0200 | | | DualGNN | 0.0365 | 0.0542 | 0.0875 | 0.0206 | 0.0252 | 0.0319 | | | GRCN | 0.0389 | 0.0590 | 0.0970 | 0.0216 | 0.0268 | 0.0345 | | | LATTICE | - | - | - | - | - | - | | | BM3 | 0.0437 | 0.0648 | 0.1021 | 0.0247 | 0.0302 | 0.0378 | | | SLMRec | _0.0443_ | _0.0651_ | _0.1038_ | _0.0249_ | _0.0303_ | _0.0382_ | | | ADDVAE | **0.0451** | **0.0665** | **0.1066** | **0.0253** | **0.0308** | **0.0390** | | | FREEDOM | 0.0396 | 0.0601 | 0.0998 | 0.0220 | 0.0273 | 0.0353 | ### Ablation Study #### Recommendation performance comparison using different data split methods.: We evaluate the performance of various recommendation models using different data splitting methods. The offline evaluation is based on the historical item ratings or the implicit item feedback. As this method relies on the user-item interactions and the models are all learning based on the supervised signals, we need to split the interactions into train, validation and test sets. There are three main split strategies that we applied to compare the performance: • Random split: As the name suggested, this split strategy randomly selects the train and test boundary for each user, which selects to split the interactions according to the ratio. The disadvantage of the random splitting strategy is that they are not capable to reproduce unless the authors publish how the data split and this is not a realistic scenario without considering the time. • User time split: The temporal split strategy splits the historical interactions based on the interaction timestamp by the ratio (e.g., train:validation:test=8:1:1). It split the last percentage of interactions the user made as the test set. Although it considers the timestamp, it is still not a realistic scenario because it is still splitting the train/test sets among all the interactions one user made but did not consider the global time. • Global time split: The global time splitting strategy fixed the time point shared by all users according to the splitting ratio. The interactions after the last time point are split as the test set. Additionally, the users of the interactions after the global temporal boundary must be in the training set, which follows the most realistic and strict settings. The limitation of this strategy is that the number of users will be reduced due to the reason that the users not existing in the training set will be deleted Our experiments on the Sports dataset, using these three splitting strategies, provide insights into their impact on recommendation performance. The table below presents the performance comparison results in terms of Recall@k and NDCG@k where k=10,20, and the second table shows the performance ranking of models based on Recall@20 and NDCG@20. | Dataset | Model | | Recall@10 | | | Recall@20 | | |---------|----------|----------|-----------|-------------|----------|-----------|-------------| | | | Random | User Time | Global Time | Random | User Time | Global Time | | | MMGCN | 0.0384 | 0.0266 | 0.0140 | 0.0611 | 0.0446 | 0.0245 | | | BPR | 0.0444 | 0.0322 | 0.0152 | 0.0663 | 0.0509 | 0.0258 | | | VBPR | 0.0563 | 0.0385 | 0.0176 | 0.0851 | 0.0620 | 0.0298 | | | DualGNN | 0.0576 | 0.0403 | 0.0181 | 0.0859 | 0.0611 | 0.0297 | | sports | GRCN | 0.0604 | 0.0418 | 0.0167 | 0.0915 | 0.0666 | 0.0286 | | | LightGCN | 0.0568 | 0.0405 | 0.0205 | 0.0863 | 0.0663 | 0.0336 | | | LATTICE | 0.0641 | 0.0450 | 0.0207 | 0.0964 | 0.0699 | 0.0337 | | | BM3 | 0.0646 | 0.0447 | 0.0213 | 0.0955 | 0.0724 | 0.0336 | | | SLMRec | 0.0651 | 0.0470 | 0.0220 | 0.0985 | 0.0733 | 0.0350 | | | FREEDOM | 0.0708 | 0.0490 | 0.0226 | 0.1080 | 0.0782 | 0.0372 | | Dataset | Model | | NDCG@10 | | | NDCG@20 | | | | | Random | User Time | Global Time | Random | User Time | Global Time | | | MMGCN | 0.0202 | 0.0134 | 0.0091 | 0.0261 | 0.0180 | 0.0125 | | | BPR | 0.0245 | 0.0169 | 0.0102 | 0.0302 | 0.0218 | 0.0135 | | | VBPR | 0.0304 | 0.0204 | 0.0115 | 0.0378 | 0.0265 | 0.0153 | | | DualGNN | 0.0321 | 0.0214 | 0.0118 | 0.0394 | 0.0268 | 0.0155 | | sports | GRCN | 0.0332 | 0.0219 | 0.0101 | 0.0412 | 0.0282 | 0.0138 | | | LightGCN | 0.0315 | 0.0220 | 0.0139 | 0.0391 | 0.0286 | 0.0180 | | | LATTICE | 0.0351 | 0.0238 | 0.0138 | 0.0434 | 0.0302 | 0.0177 | | | BM3 | 0.0356 | 0.0237 | 0.0144 | 0.0436 | 0.0308 | 0.0182 | | | SLMRec | 0.0364 | 0.0253 | 0.0148 | 0.0450 | 0.0321 | 0.0189 | | | FREEDOM | 0.0388 | 0.0255 | 0.0151 | 0.0485 | 0.0330 | 0.0197 | As demonstrated above, different data splitting strategies lead to varied performance outcomes for the same dataset and evaluation metrics. This variability presents a challenge in comparing the effectiveness of different models when they are based on different data split strategies. | Model | | Sports, NDCG@20 | | |----------|--------|-------------------|-------------| | | Random | User Time | Global Time | | MMGCN | 10 | 10 | 10 | | BPR | 9 | 9 | 8↑1 | | VBPR | 8 | 8 | 7↑1 | | LightGCN | 7 | 5↑2 | 4↑3 | | DualGNN | 6 | 7↓1 | 6 | | DRCN | 5 | 6↓1 | 9↓4 | | LATTICE | 4 | 4 | 5↓1 | | BM3 | 3 | 3 | 3 | | SLMRec | 2 | 2 | 2 | | FREEDOM | 1 | 1 | 1 | | **Model** | | **Sports, Recall@20** | | | | Random | User Time | Global Time | | MMGCN | 10 | 10 | 10 | | BPR | 9 | 9 | 9 | | VBPR | 8 | 7↑1 | 6↑2 | | DualGNN | 7 | 8↓1 | 7 | | LightGCN | 6 | 6 | 5↑1 | | GRCN | 5 | 5 | 8↓3 | | BM3 | 4 | 3↑1 | 4 | | LATTICE | 3 | 4↓1 | 3 | | SLMRec | 2 | 2 | 2 | | FREEDOM | 1 | 1 | 1 | The above table reports the ranks of SOTA models under each splitting strategy. The rows are sorted by the performance of models under random splitting strategy, with the up and down arrows indicating the relative rank position swaps compared with random splitting. As we can see, the ranking swaps are observed between the models under different splitting strategies #### Recommendation performance comparison using Different Modalities We are interested in how the modality information benefits the recommendation, and which modality contributes more. We aim to understand the specific benefits of different modalities in recommendation systems and provide guidelines for researchers on selecting appropriate modalities. We evaluate it by feeding the single modality information, and compare the performance between using both modalities and the single modality. The following figure is based on Recall@20 to show the summary and tendency of other modalities, visually summarize the impact of different modalities on various models. The orange point represents the performance of multi-modality, the green one represents the performance of textual modality and the blue point is for visual modality. The specific numerical values will be shown in our github. image-1image-2 ## Please consider to cite our paper those results helps you, thanks: ``` ================================================ FILE: preprocessing/0rating2inter.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "source": [ "# 从ratings_Sports_and_Outdoors.csv文件中提取U-I交互图, 5-core后重新编号\n", "- Extracting U-I interactions and performing 5-core, re-indexing\n", "- dataset located at: http://jmcauley.ucsd.edu/data/amazon/links.html, rating only file in \"Small\" subsets for experimentation" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import os, csv\n", "import pandas as pd" ] }, { "cell_type": "code", "execution_count": null, "outputs": [], "source": [ "os.chdir('/home/enoche/MMRec/Sports14')\n", "os.getcwd()" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n", "is_executing": true } } }, { "cell_type": "markdown", "source": [ "## 先5-core过滤\n", "## 5-core filtering" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": 3, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "shape: (3268695, 4)\n" ] }, { "data": { "text/plain": " userID itemID rating timestamp\n0 A3PMSRCL80KSA1 0000031852 4.0 1388275200\n1 A1SNLWGLFXD70K 0000031852 4.0 1392940800\n2 A1KJ4CVG87QW09 0000031852 4.0 1389657600\n3 AA9ITO6ZLZW6 0000031852 5.0 1399507200\n4 APJ5ULJ1RMZ4 0000031852 1.0 1398556800", "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
userIDitemIDratingtimestamp
0A3PMSRCL80KSA100000318524.01388275200
1A1SNLWGLFXD70K00000318524.01392940800
2A1KJ4CVG87QW0900000318524.01389657600
3AA9ITO6ZLZW600000318525.01399507200
4APJ5ULJ1RMZ400000318521.01398556800
\n
" }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df = pd.read_csv('ratings_Sports_and_Outdoors.csv', names=['userID', 'itemID', 'rating', 'timestamp'], header=None)\n", "print(f'shape: {df.shape}')\n", "df[:5]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 4, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "After dropped: (3268695, 4)\n" ] }, { "data": { "text/plain": " userID itemID rating timestamp\n0 A3PMSRCL80KSA1 0000031852 4.0 1388275200\n1 A1SNLWGLFXD70K 0000031852 4.0 1392940800\n2 A1KJ4CVG87QW09 0000031852 4.0 1389657600", "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
userIDitemIDratingtimestamp
0A3PMSRCL80KSA100000318524.01388275200
1A1SNLWGLFXD70K00000318524.01392940800
2A1KJ4CVG87QW0900000318524.01389657600
\n
" }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "k_core = 5\n", "learner_id, course_id, tmstmp_str = 'userID', 'itemID', 'timestamp'\n", "\n", "df.dropna(subset=[learner_id, course_id, tmstmp_str], inplace=True)\n", "df.drop_duplicates(subset=[learner_id, course_id, tmstmp_str], inplace=True)\n", "print(f'After dropped: {df.shape}')\n", "df[:3]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 5, "outputs": [], "source": [ "from collections import Counter\n", "import numpy as np\n", "\n", "min_u_num, min_i_num = 5, 5\n", "\n", "def get_illegal_ids_by_inter_num(df, field, max_num=None, min_num=None):\n", " if field is None:\n", " return set()\n", " if max_num is None and min_num is None:\n", " return set()\n", "\n", " max_num = max_num or np.inf\n", " min_num = min_num or -1\n", "\n", " ids = df[field].values\n", " inter_num = Counter(ids)\n", " ids = {id_ for id_ in inter_num if inter_num[id_] < min_num or inter_num[id_] > max_num}\n", " print(f'{len(ids)} illegal_ids_by_inter_num, field={field}')\n", "\n", " return ids\n", "\n", "\n", "def filter_by_k_core(df):\n", " while True:\n", " ban_users = get_illegal_ids_by_inter_num(df, field=learner_id, max_num=None, min_num=min_u_num)\n", " ban_items = get_illegal_ids_by_inter_num(df, field=course_id, max_num=None, min_num=min_i_num)\n", " if len(ban_users) == 0 and len(ban_items) == 0:\n", " return\n", "\n", " dropped_inter = pd.Series(False, index=df.index)\n", " if learner_id:\n", " dropped_inter |= df[learner_id].isin(ban_users)\n", " if course_id:\n", " dropped_inter |= df[course_id].isin(ban_items)\n", " print(f'{len(dropped_inter)} dropped interactions')\n", " df.drop(df.index[dropped_inter], inplace=True)\n", "\n" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "markdown", "source": [ "## k-core" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": 6, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1906153 illegal_ids_by_inter_num, field=userID\n", "376127 illegal_ids_by_inter_num, field=itemID\n", "3268695 dropped interactions\n", "22213 illegal_ids_by_inter_num, field=userID\n", "54919 illegal_ids_by_inter_num, field=itemID\n", "589029 dropped interactions\n", "18323 illegal_ids_by_inter_num, field=userID\n", "3743 illegal_ids_by_inter_num, field=itemID\n", "422478 dropped interactions\n", "2298 illegal_ids_by_inter_num, field=userID\n", "4388 illegal_ids_by_inter_num, field=itemID\n", "349749 dropped interactions\n", "3331 illegal_ids_by_inter_num, field=userID\n", "639 illegal_ids_by_inter_num, field=itemID\n", "326238 dropped interactions\n", "579 illegal_ids_by_inter_num, field=userID\n", "1012 illegal_ids_by_inter_num, field=itemID\n", "311188 dropped interactions\n", "897 illegal_ids_by_inter_num, field=userID\n", "169 illegal_ids_by_inter_num, field=itemID\n", "305054 dropped interactions\n", "155 illegal_ids_by_inter_num, field=userID\n", "308 illegal_ids_by_inter_num, field=itemID\n", "300866 dropped interactions\n", "301 illegal_ids_by_inter_num, field=userID\n", "47 illegal_ids_by_inter_num, field=itemID\n", "299031 dropped interactions\n", "50 illegal_ids_by_inter_num, field=userID\n", "79 illegal_ids_by_inter_num, field=itemID\n", "297646 dropped interactions\n", "87 illegal_ids_by_inter_num, field=userID\n", "11 illegal_ids_by_inter_num, field=itemID\n", "297132 dropped interactions\n", "16 illegal_ids_by_inter_num, field=userID\n", "24 illegal_ids_by_inter_num, field=itemID\n", "296741 dropped interactions\n", "24 illegal_ids_by_inter_num, field=userID\n", "1 illegal_ids_by_inter_num, field=itemID\n", "296581 dropped interactions\n", "1 illegal_ids_by_inter_num, field=userID\n", "8 illegal_ids_by_inter_num, field=itemID\n", "296481 dropped interactions\n", "8 illegal_ids_by_inter_num, field=userID\n", "0 illegal_ids_by_inter_num, field=itemID\n", "296445 dropped interactions\n", "0 illegal_ids_by_inter_num, field=userID\n", "5 illegal_ids_by_inter_num, field=itemID\n", "296413 dropped interactions\n", "5 illegal_ids_by_inter_num, field=userID\n", "0 illegal_ids_by_inter_num, field=itemID\n", "296393 dropped interactions\n", "0 illegal_ids_by_inter_num, field=userID\n", "3 illegal_ids_by_inter_num, field=itemID\n", "296373 dropped interactions\n", "4 illegal_ids_by_inter_num, field=userID\n", "0 illegal_ids_by_inter_num, field=itemID\n", "296361 dropped interactions\n", "0 illegal_ids_by_inter_num, field=userID\n", "1 illegal_ids_by_inter_num, field=itemID\n", "296345 dropped interactions\n", "1 illegal_ids_by_inter_num, field=userID\n", "0 illegal_ids_by_inter_num, field=itemID\n", "296341 dropped interactions\n", "0 illegal_ids_by_inter_num, field=userID\n", "0 illegal_ids_by_inter_num, field=itemID\n", "k-core shape: (296337, 4)\n", "shape after k-core: (296337, 4)\n" ] }, { "data": { "text/plain": " userID itemID rating timestamp\n564 AIXZKN4ACSKI 1881509818 5.0 1390694400\n565 A1L5P841VIO02V 1881509818 5.0 1328140800", "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
userIDitemIDratingtimestamp
564AIXZKN4ACSKI18815098185.01390694400
565A1L5P841VIO02V18815098185.01328140800
\n
" }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "filter_by_k_core(df)\n", "print(f'k-core shape: {df.shape}')\n", "print(f'shape after k-core: {df.shape}')\n", "df[:2]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "markdown", "source": [ "## Re-index" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": 7, "outputs": [], "source": [ "df.reset_index(drop=True, inplace=True)" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 8, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "mapping dumped...\n" ] } ], "source": [ "\n", "i_mapping_file = 'i_id_mapping.csv'\n", "u_mapping_file = 'u_id_mapping.csv'\n", "\n", "splitting = [0.8, 0.1, 0.1]\n", "uid_field, iid_field = learner_id, course_id\n", "\n", "uni_users = pd.unique(df[uid_field])\n", "uni_items = pd.unique(df[iid_field])\n", "\n", "# start from 0\n", "u_id_map = {k: i for i, k in enumerate(uni_users)}\n", "i_id_map = {k: i for i, k in enumerate(uni_items)}\n", "\n", "df[uid_field] = df[uid_field].map(u_id_map)\n", "df[iid_field] = df[iid_field].map(i_id_map)\n", "df[uid_field] = df[uid_field].astype(int)\n", "df[iid_field] = df[iid_field].astype(int)\n", "\n", "# dump\n", "rslt_dir = './'\n", "u_df = pd.DataFrame(list(u_id_map.items()), columns=['user_id', 'userID'])\n", "i_df = pd.DataFrame(list(i_id_map.items()), columns=['asin', 'itemID'])\n", "\n", "u_df.to_csv(os.path.join(rslt_dir, u_mapping_file), sep='\\t', index=False)\n", "i_df.to_csv(os.path.join(rslt_dir, i_mapping_file), sep='\\t', index=False)\n", "print(f'mapping dumped...')" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": null, "outputs": [], "source": [ "\n", "# =========2. splitting\n", "print(f'splitting ...')\n", "tot_ratio = sum(splitting)\n", "# remove 0.0 in ratios\n", "ratios = [i for i in splitting if i > .0]\n", "ratios = [_ / tot_ratio for _ in ratios]\n", "split_ratios = np.cumsum(ratios)[:-1]\n", "\n", "#df[tmstmp_str] = df[tmstmp_str].map(lambda x: datetime.strptime(x, \"%Y-%m-%dT%H:%M:%SZ\"))\n", "split_ratios" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n", "is_executing": true } } }, { "cell_type": "code", "execution_count": 10, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "columns: Index(['userID', 'itemID', 'rating', 'timestamp', 'x_label'], dtype='object')\n" ] }, { "data": { "text/plain": " userID itemID rating timestamp x_label\n1 1 0 5.0 1328140800 0\n2 2 0 4.0 1330387200 0\n3 3 0 4.0 1328400000 0\n4 4 0 4.0 1366675200 0\n5 5 0 5.0 1351814400 0", "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
userIDitemIDratingtimestampx_label
1105.013281408000
2204.013303872000
3304.013284000000
4404.013666752000
5505.013518144000
\n
" }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "ts_id = 'timestamp'\n", "\n", "split_timestamps = list(np.quantile(df[ts_id], split_ratios))\n", "# get df training dataset unique users/items\n", "df_train = df.loc[df[ts_id] < split_timestamps[0]].copy()\n", "df_val = df.loc[(split_timestamps[0] <= df[ts_id]) & (df[ts_id] < split_timestamps[1])].copy()\n", "df_test = df.loc[(split_timestamps[1] <= df[ts_id])].copy()\n", "\n", "x_label, rslt_file = 'x_label', 'sports14-indexed.inter'\n", "df_train[x_label] = 0\n", "df_val[x_label] = 1\n", "df_test[x_label] = 2\n", "temp_df = pd.concat([df_train, df_val, df_test])\n", "temp_df = temp_df[[learner_id, course_id, 'rating', ts_id, x_label]]\n", "print(f'columns: {temp_df.columns}')\n", "\n", "temp_df.columns = [learner_id, course_id, 'rating', ts_id, x_label]\n", "\n", "temp_df.to_csv(os.path.join(rslt_dir, rslt_file), sep='\\t', index=False)\n", "temp_df[:5]\n", "#print('done!')" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "markdown", "source": [], "metadata": { "collapsed": false } }, { "cell_type": "markdown", "source": [], "metadata": { "collapsed": false } }, { "cell_type": "markdown", "source": [ "## Reload" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%% md\n" } } }, { "cell_type": "code", "execution_count": 11, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "shape: (296337, 5)\n" ] }, { "data": { "text/plain": " userID itemID rating timestamp x_label\n0 1 0 5.0 1328140800 0\n1 2 0 4.0 1330387200 0\n2 3 0 4.0 1328400000 0\n3 4 0 4.0 1366675200 0", "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
userIDitemIDratingtimestampx_label
0105.013281408000
1204.013303872000
2304.013284000000
3404.013666752000
\n
" }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "indexed_df = pd.read_csv(rslt_file, sep='\\t')\n", "print(f'shape: {indexed_df.shape}')\n", "indexed_df[:4]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 12, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "# of unique learners: 35598\n", "# of unique courses: 18357\n", "min/max of unique learners: 0/35597\n", "min/max of unique courses: 0/18356\n" ] } ], "source": [ "u_uni = indexed_df[learner_id].unique()\n", "c_uni = indexed_df[course_id].unique()\n", "\n", "print(f'# of unique learners: {len(u_uni)}')\n", "print(f'# of unique courses: {len(c_uni)}')\n", "\n", "print('min/max of unique learners: {0}/{1}'.format(min(u_uni), max(u_uni)))\n", "print('min/max of unique courses: {0}/{1}'.format(min(c_uni), max(c_uni)))\n" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.6" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: preprocessing/1splitting.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "source": [ "# 基于rating2inter.ipynb生成的5-core交互图,Train/Validation/Test data splitting\n", "- Based on generated interactions, perform data splitting\n" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import os, csv\n", "import pandas as pd" ] }, { "cell_type": "code", "execution_count": null, "outputs": [], "source": [ "os.chdir('/home/enoche/MMRec/Sports14')\n", "os.getcwd()" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n", "is_executing": true } } }, { "cell_type": "markdown", "source": [ "## 直接加载现成的, Load interactions" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": 3, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "shape: (296337, 5)\n" ] }, { "data": { "text/plain": " userID itemID rating timestamp x_label\n0 1 0 5.0 1328140800 0\n1 2 0 4.0 1330387200 0\n2 3 0 4.0 1328400000 0\n3 4 0 4.0 1366675200 0", "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
userIDitemIDratingtimestampx_label
0105.013281408000
1204.013303872000
2304.013284000000
3404.013666752000
\n
" }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "rslt_file = 'sports14-indexed.inter'\n", "df = pd.read_csv(rslt_file, sep='\\t')\n", "print(f'shape: {df.shape}')\n", "df[:4]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 4, "outputs": [], "source": [ "import random\n", "import numpy as np" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 4, "outputs": [ { "data": { "text/plain": " userID itemID rating timestamp x_label\n154667 0 11981 2.0 1390694400 1\n295557 0 15852 5.0 1390694400 1\n189316 0 17787 3.0 1391990400 2\n151302 0 0 5.0 1390694400 1\n1820 0 3369 5.0 1405123200 2\n60040 0 13372 5.0 1391990400 2\n199192 0 5458 5.0 1405123200 2\n163234 0 3327 3.0 1391990400 2\n60837 1 2322 5.0 1337212800 0\n233786 1 4123 5.0 1354838400 0\n163460 1 14212 5.0 1368230400 0\n206628 1 1542 4.0 1302220800 0\n261633 1 8802 4.0 1368230400 0\n99658 1 9198 5.0 1318377600 0\n268935 1 7215 5.0 1285372800 0\n77956 1 13468 5.0 1328140800 0\n105444 1 2374 5.0 1391558400 1\n237889 1 7169 5.0 1302220800 0\n173295 1 6677 5.0 1318377600 0\n50074 1 15278 5.0 1344902400 0", "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
userIDitemIDratingtimestampx_label
1546670119812.013906944001
2955570158525.013906944001
1893160177873.013919904002
151302005.013906944001
1820033695.014051232002
600400133725.013919904002
199192054585.014051232002
163234033273.013919904002
60837123225.013372128000
233786141235.013548384000
1634601142125.013682304000
206628115424.013022208000
261633188024.013682304000
99658191985.013183776000
268935172155.012853728000
779561134685.013281408000
105444123745.013915584001
237889171695.013022208000
173295166775.013183776000
500741152785.013449024000
\n
" }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "\n", "df = df.sample(frac=1).reset_index(drop=True)\n", "\n", "df.sort_values(by=['userID'], inplace=True)\n", "df[:20]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": null, "outputs": [], "source": [ "uid_field, iid_field = 'userID', 'itemID'\n", "\n", "uid_freq = df.groupby(uid_field)[iid_field]\n", "u_i_dict = {}\n", "for u, u_ls in uid_freq:\n", " u_i_dict[u] = list(u_ls)\n", "u_i_dict" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n", "is_executing": true } } }, { "cell_type": "code", "execution_count": 6, "outputs": [ { "data": { "text/plain": "[0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 1,\n 2,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 1,\n 2,\n 2,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 1,\n 1,\n 2,\n 2,\n 0,\n 0,\n 0,\n 1,\n 2,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 1,\n 2,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0]" }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "new_label = []\n", "u_ids_sorted = sorted(u_i_dict.keys())\n", "\n", "for u in u_ids_sorted:\n", " items = u_i_dict[u]\n", " n_items = len(items)\n", " if n_items < 10:\n", " tmp_ls = [0] * (n_items - 2) + [1] + [2]\n", " else:\n", " val_test_len = int(n_items * 0.2)\n", " train_len = n_items - val_test_len\n", " val_len = val_test_len // 2\n", " test_len = val_test_len - val_len\n", " tmp_ls = [0] * train_len + [1] * val_len + [2] * test_len\n", " new_label.extend(tmp_ls)\n", "\n", "new_label[:100]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 7, "outputs": [ { "data": { "text/plain": " userID itemID rating timestamp x_label\n154667 0 11981 2.0 1390694400 0\n295557 0 15852 5.0 1390694400 0\n189316 0 17787 3.0 1391990400 0\n151302 0 0 5.0 1390694400 0\n1820 0 3369 5.0 1405123200 0\n60040 0 13372 5.0 1391990400 0\n199192 0 5458 5.0 1405123200 1\n163234 0 3327 3.0 1391990400 2\n60837 1 2322 5.0 1337212800 0\n233786 1 4123 5.0 1354838400 0\n163460 1 14212 5.0 1368230400 0\n206628 1 1542 4.0 1302220800 0\n261633 1 8802 4.0 1368230400 0\n99658 1 9198 5.0 1318377600 0\n268935 1 7215 5.0 1285372800 0\n77956 1 13468 5.0 1328140800 0\n105444 1 2374 5.0 1391558400 0\n237889 1 7169 5.0 1302220800 0\n173295 1 6677 5.0 1318377600 0\n50074 1 15278 5.0 1344902400 0", "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
userIDitemIDratingtimestampx_label
1546670119812.013906944000
2955570158525.013906944000
1893160177873.013919904000
151302005.013906944000
1820033695.014051232000
600400133725.013919904000
199192054585.014051232001
163234033273.013919904002
60837123225.013372128000
233786141235.013548384000
1634601142125.013682304000
206628115424.013022208000
261633188024.013682304000
99658191985.013183776000
268935172155.012853728000
779561134685.013281408000
105444123745.013915584000
237889171695.013022208000
173295166775.013183776000
500741152785.013449024000
\n
" }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df['x_label'] = new_label\n", "df[:20]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 12, "outputs": [ { "data": { "text/plain": "'beauty14-indexed'" }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "rslt_file[:-6]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 8, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "done!!!\n" ] } ], "source": [ "new_labeled_file = rslt_file[:-6] + '-v4.inter'\n", "df.to_csv(os.path.join('./', new_labeled_file), sep='\\t', index=False)\n", "print('done!!!')" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "markdown", "source": [], "metadata": { "collapsed": false } }, { "cell_type": "markdown", "source": [], "metadata": { "collapsed": false } }, { "cell_type": "markdown", "source": [ "## Reload" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%% md\n" } } }, { "cell_type": "code", "execution_count": 9, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "shape: (296337, 5)\n" ] }, { "data": { "text/plain": " userID itemID rating timestamp x_label\n0 0 11981 2.0 1390694400 0\n1 0 15852 5.0 1390694400 0\n2 0 17787 3.0 1391990400 0\n3 0 0 5.0 1390694400 0\n4 0 3369 5.0 1405123200 0\n5 0 13372 5.0 1391990400 0\n6 0 5458 5.0 1405123200 1\n7 0 3327 3.0 1391990400 2\n8 1 2322 5.0 1337212800 0\n9 1 4123 5.0 1354838400 0\n10 1 14212 5.0 1368230400 0\n11 1 1542 4.0 1302220800 0\n12 1 8802 4.0 1368230400 0\n13 1 9198 5.0 1318377600 0\n14 1 7215 5.0 1285372800 0\n15 1 13468 5.0 1328140800 0\n16 1 2374 5.0 1391558400 0\n17 1 7169 5.0 1302220800 0\n18 1 6677 5.0 1318377600 0\n19 1 15278 5.0 1344902400 0", "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
userIDitemIDratingtimestampx_label
00119812.013906944000
10158525.013906944000
20177873.013919904000
3005.013906944000
4033695.014051232000
50133725.013919904000
6054585.014051232001
7033273.013919904002
8123225.013372128000
9141235.013548384000
101142125.013682304000
11115424.013022208000
12188024.013682304000
13191985.013183776000
14172155.012853728000
151134685.013281408000
16123745.013915584000
17171695.013022208000
18166775.013183776000
191152785.013449024000
\n
" }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "indexed_df = pd.read_csv(new_labeled_file, sep='\\t')\n", "print(f'shape: {indexed_df.shape}')\n", "indexed_df[:20]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 10, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "# of unique learners: 35598\n", "# of unique courses: 18357\n", "min/max of unique learners: 0/35597\n", "min/max of unique courses: 0/18356\n" ] } ], "source": [ "u_id_str, i_id_str = 'userID', 'itemID'\n", "u_uni = indexed_df[u_id_str].unique()\n", "c_uni = indexed_df[i_id_str].unique()\n", "\n", "print(f'# of unique learners: {len(u_uni)}')\n", "print(f'# of unique courses: {len(c_uni)}')\n", "\n", "print('min/max of unique learners: {0}/{1}'.format(min(u_uni), max(u_uni)))\n", "print('min/max of unique courses: {0}/{1}'.format(min(c_uni), max(c_uni)))\n" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.6" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: preprocessing/2reindex-feat.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "source": [ "# 利用rating2inter.ipynb中U/I的index对features进行一一对应(meta-text)\n", "- Reindex item feature ID with IDs generated in 0rating2inter.ipynb" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import os\n", "import pandas as pd" ] }, { "cell_type": "code", "execution_count": 2, "outputs": [ { "data": { "text/plain": "'/home/xin/XMMRec/Sports14'" }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "os.chdir('/home/xin/MMRec/Sports14')\n", "os.getcwd()" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 3, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "shape: (18357, 2)\n" ] }, { "data": { "text/plain": " asin itemID\n0 1881509818 0\n1 2094869245 1\n2 7245456259 2\n3 7245456313 3", "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
asinitemID
018815098180
120948692451
272454562592
372454563133
\n
" }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# load item mapping\n", "i_id_mapping = 'i_id_mapping.csv'\n", "df = pd.read_csv(i_id_mapping, sep='\\t')\n", "print(f'shape: {df.shape}')\n", "df[:4]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 4, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "0 Extracting U-I interactions.\n", "Total records: (532197, 9)\n" ] }, { "data": { "text/plain": " asin title price \\\n0 0000032069 Adult Ballet Tutu Cheetah Pink 7.89 \n1 0000031909 Girls Ballet Tutu Neon Pink 7.00 \n2 0000032034 Adult Ballet Tutu Yellow 7.87 \n\n imUrl \\\n0 http://ecx.images-amazon.com/images/I/51EzU6qu... \n1 http://ecx.images-amazon.com/images/I/41xBoP0F... \n2 http://ecx.images-amazon.com/images/I/21GNUNIa... \n\n related brand \\\n0 {'also_bought': ['0000032050', 'B00D0DJAEG', '... BubuBibi \n1 {'also_bought': ['B002BZX8Z6', 'B00JHONN1S', '... Unknown \n2 {'also_bought': ['B00D2JSRFQ', '0000032042', '... BubuBibi \n\n categories \\\n0 [[Sports & Outdoors, Other Sports, Dance, Clot... \n1 [[Sports & Outdoors, Other Sports, Dance]] \n2 [[Sports & Outdoors, Other Sports, Dance, Clot... \n\n salesRank description \n0 NaN NaN \n1 {'Toys & Games': 201847} High quality 3 layer ballet tutu. 12 inches in... \n2 NaN NaN ", "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
asintitlepriceimUrlrelatedbrandcategoriessalesRankdescription
00000032069Adult Ballet Tutu Cheetah Pink7.89http://ecx.images-amazon.com/images/I/51EzU6qu...{'also_bought': ['0000032050', 'B00D0DJAEG', '...BubuBibi[[Sports & Outdoors, Other Sports, Dance, Clot...NaNNaN
10000031909Girls Ballet Tutu Neon Pink7.00http://ecx.images-amazon.com/images/I/41xBoP0F...{'also_bought': ['B002BZX8Z6', 'B00JHONN1S', '...Unknown[[Sports & Outdoors, Other Sports, Dance]]{'Toys & Games': 201847}High quality 3 layer ballet tutu. 12 inches in...
20000032034Adult Ballet Tutu Yellow7.87http://ecx.images-amazon.com/images/I/21GNUNIa...{'also_bought': ['B00D2JSRFQ', '0000032042', '...BubuBibi[[Sports & Outdoors, Other Sports, Dance, Clot...NaNNaN
\n
" }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "\n", "import gzip, json\n", "meta_file = 'meta_Sports_and_Outdoors.json.gz'\n", "\n", "print('0 Extracting U-I interactions.')\n", "\n", "def parse(path):\n", " g = gzip.open(path, 'rb')\n", " for l in g:\n", " yield eval(l)\n", "\n", "def getDF(path):\n", " i = 0\n", " df = {}\n", " for d in parse(path):\n", " df[i] = d\n", " i += 1\n", " return pd.DataFrame.from_dict(df, orient='index')\n", "\n", "meta_df = getDF(meta_file)\n", "\n", "print(f'Total records: {meta_df.shape}')\n", "meta_df[:3]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 5, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "shape: (18357, 10)\n" ] }, { "data": { "text/plain": " asin title price \\\n132 1881509818 Ghost Inc Glock Armorers Tool 3/32 Punch 9.99 \n155 2094869245 5 LED Bicycle Rear Tail Red Bike Torch Laser B... 8.26 \n\n imUrl \\\n132 http://ecx.images-amazon.com/images/I/21iMxsyD... \n155 http://ecx.images-amazon.com/images/I/51RtwnJw... \n\n related brand \\\n132 {'also_bought': ['B000U3YWEM', 'B000U401J6', '... Ghost \n155 {'also_bought': ['B0081O93N2', 'B00EYTCHJA', '... \n\n categories \\\n132 [[Sports & Outdoors, Hunting & Fishing, Huntin... \n155 [[Sports & Outdoors, Cycling, Lights & Reflect... \n\n salesRank \\\n132 {'Sports & Outdoors': 172909} \n155 {'Sports & Outdoors': 14293} \n\n description itemID \n132 Ghost Armorer Tool (1). The GAT is made with a... 0 \n155 This newly-designed Laser tail light can emit ... 1 ", "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
asintitlepriceimUrlrelatedbrandcategoriessalesRankdescriptionitemID
1321881509818Ghost Inc Glock Armorers Tool 3/32 Punch9.99http://ecx.images-amazon.com/images/I/21iMxsyD...{'also_bought': ['B000U3YWEM', 'B000U401J6', '...Ghost[[Sports & Outdoors, Hunting & Fishing, Huntin...{'Sports &amp; Outdoors': 172909}Ghost Armorer Tool (1). The GAT is made with a...0
15520948692455 LED Bicycle Rear Tail Red Bike Torch Laser B...8.26http://ecx.images-amazon.com/images/I/51RtwnJw...{'also_bought': ['B0081O93N2', 'B00EYTCHJA', '...[[Sports & Outdoors, Cycling, Lights & Reflect...{'Sports &amp; Outdoors': 14293}This newly-designed Laser tail light can emit ...1
\n
" }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# remapping\n", "map_dict = dict(zip(df['asin'], df['itemID']))\n", "\n", "meta_df['itemID'] = meta_df['asin'].map(map_dict)\n", "meta_df.dropna(subset=['itemID'], inplace=True)\n", "meta_df['itemID'] = meta_df['itemID'].astype('int64')\n", "#meta_df['description'] = meta_df['description'].fillna(\" \")\n", "meta_df.sort_values(by=['itemID'], inplace=True)\n", "\n", "print(f'shape: {meta_df.shape}')\n", "meta_df[:2]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 6, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "new column names: ['itemID', 'asin', 'title', 'price', 'imUrl', 'related', 'brand', 'categories', 'salesRank', 'description']\n" ] } ], "source": [ "ori_cols = meta_df.columns.tolist()\n", "\n", "ret_cols = [ori_cols[-1]] + ori_cols[:-1]\n", "print(f'new column names: {ret_cols}')" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 7, "outputs": [ { "data": { "text/plain": " asin title price \\\n132 1881509818 Ghost Inc Glock Armorers Tool 3/32 Punch 9.99 \n155 2094869245 5 LED Bicycle Rear Tail Red Bike Torch Laser B... 8.26 \n201 7245456259 Black Mountain Products Single Resistance Band... 10.49 \n\n imUrl \\\n132 http://ecx.images-amazon.com/images/I/21iMxsyD... \n155 http://ecx.images-amazon.com/images/I/51RtwnJw... \n201 http://ecx.images-amazon.com/images/I/411Ikpf1... \n\n related brand \\\n132 {'also_bought': ['B000U3YWEM', 'B000U401J6', '... Ghost \n155 {'also_bought': ['B0081O93N2', 'B00EYTCHJA', '... \n201 {'also_bought': ['B00DDBS2JE', 'B00H1KNHE8', '... Black Mountain \n\n categories \\\n132 [[Sports & Outdoors, Hunting & Fishing, Huntin... \n155 [[Sports & Outdoors, Cycling, Lights & Reflect... \n201 [[Sports & Outdoors, Exercise & Fitness, Acces... \n\n salesRank \\\n132 {'Sports & Outdoors': 172909} \n155 {'Sports & Outdoors': 14293} \n201 {'Sports & Outdoors': 1010} \n\n description itemID \n132 Ghost Armorer Tool (1). The GAT is made with a... 0 \n155 This newly-designed Laser tail light can emit ... 1 \n201 Black Mountain Products single resistance band... 2 ", "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
asintitlepriceimUrlrelatedbrandcategoriessalesRankdescriptionitemID
1321881509818Ghost Inc Glock Armorers Tool 3/32 Punch9.99http://ecx.images-amazon.com/images/I/21iMxsyD...{'also_bought': ['B000U3YWEM', 'B000U401J6', '...Ghost[[Sports & Outdoors, Hunting & Fishing, Huntin...{'Sports &amp; Outdoors': 172909}Ghost Armorer Tool (1). The GAT is made with a...0
15520948692455 LED Bicycle Rear Tail Red Bike Torch Laser B...8.26http://ecx.images-amazon.com/images/I/51RtwnJw...{'also_bought': ['B0081O93N2', 'B00EYTCHJA', '...[[Sports & Outdoors, Cycling, Lights & Reflect...{'Sports &amp; Outdoors': 14293}This newly-designed Laser tail light can emit ...1
2017245456259Black Mountain Products Single Resistance Band...10.49http://ecx.images-amazon.com/images/I/411Ikpf1...{'also_bought': ['B00DDBS2JE', 'B00H1KNHE8', '...Black Mountain[[Sports & Outdoors, Exercise & Fitness, Acces...{'Sports &amp; Outdoors': 1010}Black Mountain Products single resistance band...2
\n
" }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "meta_df[:3]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 8, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "done!\n" ] } ], "source": [ "ret_df = meta_df[ret_cols]\n", "# dump\n", "ret_df.to_csv(os.path.join('./', 'meta-sports14.csv'), index=False)\n", "print('done!')" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "markdown", "source": [], "metadata": { "collapsed": false } }, { "cell_type": "markdown", "source": [ "## Reload" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": 9, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "shape: (18357, 10)\n" ] }, { "data": { "text/plain": " itemID asin title \\\n0 0 1881509818 Ghost Inc Glock Armorers Tool 3/32 Punch \n1 1 2094869245 5 LED Bicycle Rear Tail Red Bike Torch Laser B... \n2 2 7245456259 Black Mountain Products Single Resistance Band... \n3 3 7245456313 Black Mountain Products Resistance Band Set wi... \n\n price imUrl \\\n0 9.99 http://ecx.images-amazon.com/images/I/21iMxsyD... \n1 8.26 http://ecx.images-amazon.com/images/I/51RtwnJw... \n2 10.49 http://ecx.images-amazon.com/images/I/411Ikpf1... \n3 32.99 http://ecx.images-amazon.com/images/I/51FdHlZS... \n\n related brand \\\n0 {'also_bought': ['B000U3YWEM', 'B000U401J6', '... Ghost \n1 {'also_bought': ['B0081O93N2', 'B00EYTCHJA', '... NaN \n2 {'also_bought': ['B00DDBS2JE', 'B00H1KNHE8', '... Black Mountain \n3 {'also_bought': ['1612431712', 'B00GSBMW2Y', '... Black Mountain \n\n categories \\\n0 [['Sports & Outdoors', 'Hunting & Fishing', 'H... \n1 [['Sports & Outdoors', 'Cycling', 'Lights & Re... \n2 [['Sports & Outdoors', 'Exercise & Fitness', '... \n3 [['Sports & Outdoors', 'Exercise & Fitness', '... \n\n salesRank \\\n0 {'Sports & Outdoors': 172909} \n1 {'Sports & Outdoors': 14293} \n2 {'Sports & Outdoors': 1010} \n3 {'Sports & Outdoors': 15} \n\n description \n0 Ghost Armorer Tool (1). The GAT is made with a... \n1 This newly-designed Laser tail light can emit ... \n2 Black Mountain Products single resistance band... \n3 [if gte mso 9]> \n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
itemIDasintitlepriceimUrlrelatedbrandcategoriessalesRankdescription
001881509818Ghost Inc Glock Armorers Tool 3/32 Punch9.99http://ecx.images-amazon.com/images/I/21iMxsyD...{'also_bought': ['B000U3YWEM', 'B000U401J6', '...Ghost[['Sports & Outdoors', 'Hunting & Fishing', 'H...{'Sports &amp; Outdoors': 172909}Ghost Armorer Tool (1). The GAT is made with a...
1120948692455 LED Bicycle Rear Tail Red Bike Torch Laser B...8.26http://ecx.images-amazon.com/images/I/51RtwnJw...{'also_bought': ['B0081O93N2', 'B00EYTCHJA', '...NaN[['Sports & Outdoors', 'Cycling', 'Lights & Re...{'Sports &amp; Outdoors': 14293}This newly-designed Laser tail light can emit ...
227245456259Black Mountain Products Single Resistance Band...10.49http://ecx.images-amazon.com/images/I/411Ikpf1...{'also_bought': ['B00DDBS2JE', 'B00H1KNHE8', '...Black Mountain[['Sports & Outdoors', 'Exercise & Fitness', '...{'Sports &amp; Outdoors': 1010}Black Mountain Products single resistance band...
337245456313Black Mountain Products Resistance Band Set wi...32.99http://ecx.images-amazon.com/images/I/51FdHlZS...{'also_bought': ['1612431712', 'B00GSBMW2Y', '...Black Mountain[['Sports & Outdoors', 'Exercise & Fitness', '...{'Sports &amp; Outdoors': 15}[if gte mso 9]><xml> <o:OfficeDocumentSettings...
\n" }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "indexed_df = pd.read_csv('meta-sports14.csv')\n", "print(f'shape: {indexed_df.shape}')\n", "indexed_df[:4]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 10, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "# of unique items: 18357\n", "min/max of unique learners: 0/18356\n" ] } ], "source": [ "## Reload\n", "\n", "i_uni = indexed_df['itemID'].unique()\n", "\n", "print(f'# of unique items: {len(i_uni)}')\n", "\n", "print('min/max of unique learners: {0}/{1}'.format(min(i_uni), max(i_uni)))" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.6" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: preprocessing/3feat-encoder.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "source": [ "# Sports14 Text/Image Feature Extraction" ], "metadata": { "collapsed": false } }, { "cell_type": "markdown", "source": [], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": 1, "outputs": [], "source": [ "\n", "import os\n", "import numpy as np\n", "import pandas as pd" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": null, "outputs": [], "source": [ "os.chdir('/home/xin/MMRec/Sports14')\n", "os.getcwd()" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n", "is_executing": true } } }, { "cell_type": "markdown", "source": [ "## Load text data" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": 3, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "data loaded!\n", "shape: (18357, 10)\n" ] }, { "data": { "text/plain": " itemID asin title \\\n0 0 1881509818 Ghost Inc Glock Armorers Tool 3/32 Punch \n1 1 2094869245 5 LED Bicycle Rear Tail Red Bike Torch Laser B... \n2 2 7245456259 Black Mountain Products Single Resistance Band... \n\n price imUrl \\\n0 9.99 http://ecx.images-amazon.com/images/I/21iMxsyD... \n1 8.26 http://ecx.images-amazon.com/images/I/51RtwnJw... \n2 10.49 http://ecx.images-amazon.com/images/I/411Ikpf1... \n\n related brand \\\n0 {'also_bought': ['B000U3YWEM', 'B000U401J6', '... Ghost \n1 {'also_bought': ['B0081O93N2', 'B00EYTCHJA', '... NaN \n2 {'also_bought': ['B00DDBS2JE', 'B00H1KNHE8', '... Black Mountain \n\n categories \\\n0 [['Sports & Outdoors', 'Hunting & Fishing', 'H... \n1 [['Sports & Outdoors', 'Cycling', 'Lights & Re... \n2 [['Sports & Outdoors', 'Exercise & Fitness', '... \n\n salesRank \\\n0 {'Sports & Outdoors': 172909} \n1 {'Sports & Outdoors': 14293} \n2 {'Sports & Outdoors': 1010} \n\n description \n0 Ghost Armorer Tool (1). The GAT is made with a... \n1 This newly-designed Laser tail light can emit ... \n2 Black Mountain Products single resistance band... ", "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
itemIDasintitlepriceimUrlrelatedbrandcategoriessalesRankdescription
001881509818Ghost Inc Glock Armorers Tool 3/32 Punch9.99http://ecx.images-amazon.com/images/I/21iMxsyD...{'also_bought': ['B000U3YWEM', 'B000U401J6', '...Ghost[['Sports & Outdoors', 'Hunting & Fishing', 'H...{'Sports &amp; Outdoors': 172909}Ghost Armorer Tool (1). The GAT is made with a...
1120948692455 LED Bicycle Rear Tail Red Bike Torch Laser B...8.26http://ecx.images-amazon.com/images/I/51RtwnJw...{'also_bought': ['B0081O93N2', 'B00EYTCHJA', '...NaN[['Sports & Outdoors', 'Cycling', 'Lights & Re...{'Sports &amp; Outdoors': 14293}This newly-designed Laser tail light can emit ...
227245456259Black Mountain Products Single Resistance Band...10.49http://ecx.images-amazon.com/images/I/411Ikpf1...{'also_bought': ['B00DDBS2JE', 'B00H1KNHE8', '...Black Mountain[['Sports & Outdoors', 'Exercise & Fitness', '...{'Sports &amp; Outdoors': 1010}Black Mountain Products single resistance band...
\n
" }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "i_id, desc_str = 'itemID', 'description'\n", "\n", "file_path = './'\n", "file_name = 'meta-sports14.csv'\n", "\n", "meta_file = os.path.join(file_path, file_name)\n", "\n", "df = pd.read_csv(meta_file)\n", "df.sort_values(by=[i_id], inplace=True)\n", "\n", "print('data loaded!')\n", "print(f'shape: {df.shape}')\n", "\n", "df[:3]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 4, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(91, 10)\n", "(2659, 10)\n", "(40, 10)\n", "(40, 10)\n", "(0, 10)\n" ] } ], "source": [ "\n", "# sentences: title + brand + category + description | All have title + description\n", "\n", "title_na_df = df[df['title'].isnull()]\n", "print(title_na_df.shape)\n", "\n", "desc_na_df = df[df['description'].isnull()]\n", "print(desc_na_df.shape)\n", "\n", "na_df = df[df['description'].isnull() & df['title'].isnull()]\n", "print(na_df.shape)\n", "\n", "na3_df = df[df['description'].isnull() & df['title'].isnull() & df['brand'].isnull()]\n", "print(na3_df.shape)\n", "\n", "na4_df = df[df['description'].isnull() & df['title'].isnull() & df['brand'].isnull() & df['categories'].isnull()]\n", "print(na4_df.shape)" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 5, "outputs": [], "source": [ "\n", "df[desc_str] = df[desc_str].fillna(\" \")\n", "df['title'] = df['title'].fillna(\" \")\n", "df['brand'] = df['brand'].fillna(\" \")\n", "df['categories'] = df['categories'].fillna(\" \")\n" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 6, "outputs": [ { "data": { "text/plain": "['Ghost Inc Glock Armorers Tool 3/32 Punch Ghost Sports & Outdoors Hunting & Fishing Hunting Gun Maintenance Gunsmithing Tools Ghost Armorer Tool (1). The GAT is made with a spring steel punch. The diameter is 3/32 of an inch or 2.5mm, this is the same as the OEM tool size. The difference is you will be able to press harder without bending the shaft of this punch. Just a better tool to work on your Glock with.',\n '5 LED Bicycle Rear Tail Red Bike Torch Laser Beam Lamp Light Sports & Outdoors Cycling Lights & Reflectors Taillights This newly-designed Laser tail light can emit two parallel lines, to form a virtual lane together with the moving of bicycle on the road. LED flash light and two lines not only enhance the waring effect strongly and greatly but also improve the safety of night riding.',\n 'Black Mountain Products Single Resistance Band - Door Anchor and Starter Guide Included Black Mountain Sports & Outdoors Exercise & Fitness Accessories Exercise Bands Black Mountain Products single resistance bands are made out of the highest quality rubber to ensure maximum life and are 99 percent latex free! These bands are ideal for physical therapy, exercise, weight loss, Pilates, muscle toning, muscle strengthening, stretching, rehabilitation, and general health and fitness. B.M.P. resistance bands are great for home use, gym use, offices, and are ideal for travel. B.M.P. single resistance bands are a great alternative to conventional weights and exercise equipment. All Black Mountain Products Resistance bands come with a manufactures warranty.',\n 'Black Mountain Products Resistance Band Set with Door Anchor, Ankle Strap, Exercise Chart, and Resistance Band Carrying Case Black Mountain Sports & Outdoors Exercise & Fitness Accessories Exercise Bands [if gte mso 9]> Normal 0 false false false EN-US X-NONE X-NONE \n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
itemIDasintitlepriceimUrlrelatedbrandcategoriessalesRankdescription
001881509818Ghost Inc Glock Armorers Tool 3/32 Punch9.99http://ecx.images-amazon.com/images/I/21iMxsyD...{'also_bought': ['B000U3YWEM', 'B000U401J6', '...Ghost[['Sports & Outdoors', 'Hunting & Fishing', 'H...{'Sports &amp; Outdoors': 172909}Ghost Armorer Tool (1). The GAT is made with a...
1120948692455 LED Bicycle Rear Tail Red Bike Torch Laser B...8.26http://ecx.images-amazon.com/images/I/51RtwnJw...{'also_bought': ['B0081O93N2', 'B00EYTCHJA', '...[['Sports & Outdoors', 'Cycling', 'Lights & Re...{'Sports &amp; Outdoors': 14293}This newly-designed Laser tail light can emit ...
227245456259Black Mountain Products Single Resistance Band...10.49http://ecx.images-amazon.com/images/I/411Ikpf1...{'also_bought': ['B00DDBS2JE', 'B00H1KNHE8', '...Black Mountain[['Sports & Outdoors', 'Exercise & Fitness', '...{'Sports &amp; Outdoors': 1010}Black Mountain Products single resistance band...
337245456313Black Mountain Products Resistance Band Set wi...32.99http://ecx.images-amazon.com/images/I/51FdHlZS...{'also_bought': ['1612431712', 'B00GSBMW2Y', '...Black Mountain[['Sports & Outdoors', 'Exercise & Fitness', '...{'Sports &amp; Outdoors': 15}[if gte mso 9]><xml> <o:OfficeDocumentSettings...
44B000002NUSOuters Universal 32-Piece Blow Molded Gun Clea...21.99http://ecx.images-amazon.com/images/I/510GjWgd...{'also_bought': ['B000PW64JY', 'B0010KHNEU', '...Outers[['Sports & Outdoors', 'Hunting & Fishing', 'H...{'Sports &amp; Outdoors': 26738}Outers now offers this rigid and durable hard ...
\n" }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df[:5]" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 12, "outputs": [], "source": [ "import array\n", "\n", "def readImageFeatures(path):\n", " f = open(path, 'rb')\n", " while True:\n", " asin = f.read(10).decode('UTF-8')\n", " if asin == '': break\n", " a = array.array('f')\n", " a.fromfile(f, 4096)\n", " yield asin, a.tolist()" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 13, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "# of items not in processed image features: 180\n", "done!\n" ] } ], "source": [ "\n", "img_data = readImageFeatures(\"image_features_Sports_and_Outdoors.b\")\n", "item2id = dict(zip(df['asin'], df['itemID']))\n", "\n", "feats = {}\n", "avg = []\n", "for d in img_data:\n", " if d[0] in item2id:\n", " feats[int(item2id[d[0]])] = d[1]\n", " avg.append(d[1])\n", "avg = np.array(avg).mean(0).tolist()\n", "\n", "ret = []\n", "non_no = []\n", "for i in range(len(item2id)):\n", " if i in feats:\n", " ret.append(feats[i])\n", " else:\n", " non_no.append(i)\n", " ret.append(avg)\n", "\n", "print('# of items not in processed image features:', len(non_no))\n", "assert len(ret) == len(item2id)\n", "np.save('image_feat.npy', np.array(ret))\n", "np.savetxt(\"missed_img_itemIDs.csv\", non_no, delimiter =\",\", fmt ='%d')\n", "print('done!')" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.6" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: preprocessing/README.md ================================================ # Preprocessing from raw data 从原始数据处理 - The following preprocessing steps can be quite tedious. Please post issues if you cannot run the scripts. - datasets: [Amazon](http://jmcauley.ucsd.edu/data/amazon/links.html) -- Rating file in `Files/Small subsets for experimentation` -- Meta files in `Per-category files`, [metadata], [image features] There has been an issue with the dataset site lately, as it automatically redirects to an updated version of the dataset. Keep pressing `ESC` to stop the redirecting action. ## Step by step 1. Performing 5-core filtering, re-indexing - `run 0rating2inter.ipynb` 2. Train/valid/test data splitting - `run 1spliting.ipynb` 3. Reindexing feature IDs with generated IDs in step 1 - `run 2reindex-feat.ipynb` 4. Encoding text/image features - `run 3feat-encoder.ipynb` 5. Filling your data description file `*.yaml` under `src/configs/dataset` with the generated file names `*.inter`, `*-feat.npy`, etc. 6. Specifying your evaluated dataset by cmd: `python -d sports -m BM3`. ## DualGNN requires additional operation to generate the u-u graph 1. Run `dualgnn-gen-u-u-matrix.py` on a dataset `baby`: `python dualgnn-gen-u-u-matrix.py -d baby` 2. The generated u-u graph should be located in the same dir as the dataset. ================================================ FILE: preprocessing/dualgnn-gen-u-u-matrix.py ================================================ # 对应于Preprocess-ml-imdb.py文件 import numpy as np from collections import defaultdict from tqdm import tqdm import torch import pandas as pd import os import yaml import argparse def gen_user_matrix(all_edge, no_users): edge_dict = defaultdict(set) for edge in all_edge: user, item = edge edge_dict[user].add(item) min_user = 0 # 0 num_user = no_users # in our case, users/items ids start from 1 user_graph_matrix = torch.zeros(num_user, num_user) key_list = list(edge_dict.keys()) key_list.sort() bar = tqdm(total=len(key_list)) for head in range(len(key_list)): bar.update(1) for rear in range(head+1, len(key_list)): head_key = key_list[head] rear_key = key_list[rear] # print(head_key, rear_key) item_head = edge_dict[head_key] item_rear = edge_dict[rear_key] # print(len(user_head.intersection(user_rear))) inter_len = len(item_head.intersection(item_rear)) if inter_len > 0: user_graph_matrix[head_key-min_user][rear_key-min_user] = inter_len user_graph_matrix[rear_key-min_user][head_key-min_user] = inter_len bar.close() return user_graph_matrix if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--dataset', '-d', type=str, default='games', help='name of dataset') args = parser.parse_args() dataset_name = args.dataset print(f'Generating u-u matrix for {dataset_name} ...\n') config = {} os.chdir('../src') cur_dir = os.getcwd() con_dir = os.path.join(cur_dir, 'configs') # get config dir overall_config_file = os.path.join(con_dir, "overall.yaml") dataset_config_file = os.path.join(con_dir, "dataset", "{}.yaml".format(dataset_name)) conf_files = [overall_config_file, dataset_config_file] # load configs for file in conf_files: if os.path.isfile(file): with open(file, 'r', encoding='utf-8') as f: tmp_d = yaml.safe_load(f) config.update(tmp_d) dataset_path = os.path.abspath(config['data_path'] + dataset_name) print('data path:\t', dataset_path) uid_field = config['USER_ID_FIELD'] iid_field = config['ITEM_ID_FIELD'] train_df = pd.read_csv(os.path.join(dataset_path, config['inter_file_name']), sep='\t') num_user = len(pd.unique(train_df[uid_field])) train_df = train_df[train_df['x_label'] == 0].copy() train_data = train_df[[uid_field, iid_field]].to_numpy() # item_item_pairs =[] user_graph_matrix = gen_user_matrix(train_data, num_user) #####################################################################generate user-user matrix # pdb.set_trace() user_graph = user_graph_matrix # user_num = torch.zeros(num_user) user_num = torch.zeros(num_user) user_graph_dict = {} item_graph_dict = {} edge_list_i = [] edge_list_j = [] for i in range(num_user): user_num[i] = len(torch.nonzero(user_graph[i])) print("this is ", i, "num", user_num[i]) for i in range(num_user): if user_num[i] <= 200: user_i = torch.topk(user_graph[i],int(user_num[i])) edge_list_i =user_i.indices.numpy().tolist() edge_list_j =user_i.values.numpy().tolist() edge_list = [edge_list_i, edge_list_j] user_graph_dict[i] = edge_list else: user_i = torch.topk(user_graph[i], 200) edge_list_i = user_i.indices.numpy().tolist() edge_list_j = user_i.values.numpy().tolist() edge_list = [edge_list_i, edge_list_j] user_graph_dict[i] = edge_list # pdb.set_trace() np.save(os.path.join(dataset_path, config['user_graph_dict_file']), user_graph_dict, allow_pickle=True) ================================================ FILE: requirements.txt ================================================ numpy==1.21.5 pandas==1.3.5 python==3.7.11 scipy==1.7.3 torch==1.11.0 pyyaml==6.0 ================================================ FILE: src/common/abstract_recommender.py ================================================ # coding: utf-8 # @email : enoche.chow@gmail.com import os import numpy as np import torch import torch.nn as nn class AbstractRecommender(nn.Module): r"""Base class for all models """ def pre_epoch_processing(self): pass def post_epoch_processing(self): pass def calculate_loss(self, interaction): r"""Calculate the training loss for a batch data. Args: interaction (Interaction): Interaction class of the batch. Returns: torch.Tensor: Training loss, shape: [] """ raise NotImplementedError def predict(self, interaction): r"""Predict the scores between users and items. Args: interaction (Interaction): Interaction class of the batch. Returns: torch.Tensor: Predicted scores for given users and items, shape: [batch_size] """ raise NotImplementedError def full_sort_predict(self, interaction): r"""full sort prediction function. Given users, calculate the scores between users and all candidate items. Args: interaction (Interaction): Interaction class of the batch. Returns: torch.Tensor: Predicted scores for given users and all candidate items, shape: [n_batch_users * n_candidate_items] """ raise NotImplementedError # # def __str__(self): # """ # Model prints with number of trainable parameters # """ # model_parameters = filter(lambda p: p.requires_grad, self.parameters()) # params = sum([np.prod(p.size()) for p in model_parameters]) # return super().__str__() + '\nTrainable parameters: {}'.format(params) def __str__(self): """ Model prints with number of trainable parameters """ model_parameters = self.parameters() params = sum([np.prod(p.size()) for p in model_parameters]) return super().__str__() + '\nTrainable parameters: {}'.format(params) class GeneralRecommender(AbstractRecommender): """This is a abstract general recommender. All the general model should implement this class. The base general recommender class provide the basic dataset and parameters information. """ def __init__(self, config, dataloader): super(GeneralRecommender, self).__init__() # load dataset info self.USER_ID = config['USER_ID_FIELD'] self.ITEM_ID = config['ITEM_ID_FIELD'] self.NEG_ITEM_ID = config['NEG_PREFIX'] + self.ITEM_ID self.n_users = dataloader.dataset.get_user_num() self.n_items = dataloader.dataset.get_item_num() # load parameters info self.batch_size = config['train_batch_size'] self.device = config['device'] # load encoded features here self.v_feat, self.t_feat = None, None if not config['end2end'] and config['is_multimodal_model']: dataset_path = os.path.abspath(config['data_path'] + config['dataset']) # if file exist? v_feat_file_path = os.path.join(dataset_path, config['vision_feature_file']) t_feat_file_path = os.path.join(dataset_path, config['text_feature_file']) if os.path.isfile(v_feat_file_path): self.v_feat = torch.from_numpy(np.load(v_feat_file_path, allow_pickle=True)).type(torch.FloatTensor).to( self.device) if os.path.isfile(t_feat_file_path): self.t_feat = torch.from_numpy(np.load(t_feat_file_path, allow_pickle=True)).type(torch.FloatTensor).to( self.device) assert self.v_feat is not None or self.t_feat is not None, 'Features all NONE' ================================================ FILE: src/common/encoders.py ================================================ import copy import math import numpy as np import torch import torch.nn as nn from common.abstract_recommender import GeneralRecommender import scipy.sparse as sp class LightGCN_Encoder(GeneralRecommender): def __init__(self, config, dataset): super(LightGCN_Encoder, self).__init__(config, dataset) # load dataset info self.interaction_matrix = dataset.inter_matrix( form='coo').astype(np.float32) self.user_count = self.n_users self.item_count = self.n_items self.latent_size = config['embedding_size'] self.n_layers = 3 if config['n_layers'] is None else config['n_layers'] self.layers = [self.latent_size] * self.n_layers self.drop_ratio = 1.0 self.drop_flag = True self.embedding_dict = self._init_model() self.sparse_norm_adj = self.get_norm_adj_mat().to(self.device) def _init_model(self): initializer = nn.init.xavier_uniform_ embedding_dict = nn.ParameterDict({ 'user_emb': nn.Parameter(initializer(torch.empty(self.user_count, self.latent_size))), 'item_emb': nn.Parameter(initializer(torch.empty(self.item_count, self.latent_size))) }) return embedding_dict def get_norm_adj_mat(self): r"""Get the normalized interaction matrix of users and items. Construct the square matrix from the training data and normalize it using the laplace matrix. .. math:: A_{hat} = D^{-0.5} \times A \times D^{-0.5} Returns: Sparse tensor of the normalized interaction matrix. """ # build adj matrix A = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32) inter_M = self.interaction_matrix inter_M_t = self.interaction_matrix.transpose() data_dict = dict(zip(zip(inter_M.row, inter_M.col+self.n_users), [1]*inter_M.nnz)) data_dict.update(dict(zip(zip(inter_M_t.row+self.n_users, inter_M_t.col), [1]*inter_M_t.nnz))) A._update(data_dict) # norm adj matrix sumArr = (A > 0).sum(axis=1) # add epsilon to avoid Devide by zero Warning diag = np.array(sumArr.flatten())[0] + 1e-7 diag = np.power(diag, -0.5) D = sp.diags(diag) L = D * A * D # covert norm_adj matrix to tensor L = sp.coo_matrix(L) row = L.row col = L.col i = torch.LongTensor([row, col]) data = torch.FloatTensor(L.data) SparseL = torch.sparse.FloatTensor(i, data, torch.Size(L.shape)) return SparseL def sparse_dropout(self, x, rate, noise_shape): random_tensor = 1 - rate random_tensor += torch.rand(noise_shape).to(self.device) dropout_mask = torch.floor(random_tensor).type(torch.bool) i = x._indices() v = x._values() i = i[:, dropout_mask] v = v[dropout_mask] out = torch.sparse.FloatTensor(i, v, x.shape).to(self.device) return out * (1. / (1 - rate)) def forward(self, inputs): A_hat = self.sparse_dropout(self.sparse_norm_adj, np.random.random() * self.drop_ratio, self.sparse_norm_adj._nnz()) if self.drop_flag else self.sparse_norm_adj ego_embeddings = torch.cat([self.embedding_dict['user_emb'], self.embedding_dict['item_emb']], 0) all_embeddings = [ego_embeddings] for k in range(len(self.layers)): ego_embeddings = torch.sparse.mm(A_hat, ego_embeddings) all_embeddings += [ego_embeddings] all_embeddings = torch.stack(all_embeddings, dim=1) all_embeddings = torch.mean(all_embeddings, dim=1) user_all_embeddings = all_embeddings[:self.user_count, :] item_all_embeddings = all_embeddings[self.user_count:, :] users, items = inputs[0], inputs[1] user_embeddings = user_all_embeddings[users, :] item_embeddings = item_all_embeddings[items, :] return user_embeddings, item_embeddings @torch.no_grad() def get_embedding(self): A_hat = self.sparse_norm_adj ego_embeddings = torch.cat([self.embedding_dict['user_emb'], self.embedding_dict['item_emb']], 0) all_embeddings = [ego_embeddings] for k in range(len(self.layers)): ego_embeddings = torch.sparse.mm(A_hat, ego_embeddings) all_embeddings += [ego_embeddings] all_embeddings = torch.stack(all_embeddings, dim=1) all_embeddings = torch.mean(all_embeddings, dim=1) user_all_embeddings = all_embeddings[:self.user_count, :] item_all_embeddings = all_embeddings[self.user_count:, :] return user_all_embeddings, item_all_embeddings ================================================ FILE: src/common/init.py ================================================ # coding: utf-8 # @email : enoche.chow@gmail.com import torch.nn as nn from torch.nn.init import xavier_normal_, xavier_uniform_, constant_ def xavier_normal_initialization(module): r""" using `xavier_normal_`_ in PyTorch to initialize the parameters in nn.Embedding and nn.Linear layers. For bias in nn.Linear layers, using constant 0 to initialize. .. _`xavier_normal_`: https://pytorch.org/docs/stable/nn.init.html?highlight=xavier_normal_#torch.nn.init.xavier_normal_ Examples: >>> self.apply(xavier_normal_initialization) """ if isinstance(module, nn.Embedding): xavier_normal_(module.weight.data) elif isinstance(module, nn.Linear): xavier_normal_(module.weight.data) if module.bias is not None: constant_(module.bias.data, 0) def xavier_uniform_initialization(module): r""" using `xavier_uniform_`_ in PyTorch to initialize the parameters in nn.Embedding and nn.Linear layers. For bias in nn.Linear layers, using constant 0 to initialize. .. _`xavier_uniform_`: https://pytorch.org/docs/stable/nn.init.html?highlight=xavier_uniform_#torch.nn.init.xavier_uniform_ Examples: >>> self.apply(xavier_uniform_initialization) """ if isinstance(module, nn.Embedding) or isinstance(module, nn.Parameter): xavier_uniform_(module.weight.data) elif isinstance(module, nn.Linear): xavier_uniform_(module.weight.data) if module.bias is not None: constant_(module.bias.data, 0) ================================================ FILE: src/common/loss.py ================================================ # coding: utf-8 # @email : enoche.chow@gmail.com import torch import torch.nn as nn class BPRLoss(nn.Module): """ BPRLoss, based on Bayesian Personalized Ranking Args: - gamma(float): Small value to avoid division by zero Shape: - Pos_score: (N) - Neg_score: (N), same shape as the Pos_score - Output: scalar. Examples:: >>> loss = BPRLoss() >>> pos_score = torch.randn(3, requires_grad=True) >>> neg_score = torch.randn(3, requires_grad=True) >>> output = loss(pos_score, neg_score) >>> output.backward() """ def __init__(self, gamma=1e-10): super(BPRLoss, self).__init__() self.gamma = gamma def forward(self, pos_score, neg_score): loss = - torch.log(self.gamma + torch.sigmoid(pos_score - neg_score)).mean() return loss class EmbLoss(nn.Module): """ EmbLoss, regularization on embeddings """ def __init__(self, norm=2): super(EmbLoss, self).__init__() self.norm = norm def forward(self, *embeddings): emb_loss = torch.zeros(1).to(embeddings[-1].device) for embedding in embeddings: emb_loss += torch.norm(embedding, p=self.norm) emb_loss /= embeddings[-1].shape[0] return emb_loss class L2Loss(nn.Module): def __init__(self): super(L2Loss, self).__init__() def forward(self, *embeddings): l2_loss = torch.zeros(1).to(embeddings[-1].device) for embedding in embeddings: l2_loss += torch.sum(embedding**2)*0.5 return l2_loss ================================================ FILE: src/common/trainer.py ================================================ # coding: utf-8 # @email: enoche.chow@gmail.com r""" ################################ """ import os import itertools import torch import torch.optim as optim from torch.nn.utils.clip_grad import clip_grad_norm_ import numpy as np import matplotlib.pyplot as plt from time import time from logging import getLogger from utils.utils import get_local_time, early_stopping, dict2str from utils.topk_evaluator import TopKEvaluator class AbstractTrainer(object): r"""Trainer Class is used to manage the training and evaluation processes of recommender system models. AbstractTrainer is an abstract class in which the fit() and evaluate() method should be implemented according to different training and evaluation strategies. """ def __init__(self, config, model): self.config = config self.model = model def fit(self, train_data): r"""Train the model based on the train data. """ raise NotImplementedError('Method [next] should be implemented.') def evaluate(self, eval_data): r"""Evaluate the model based on the eval data. """ raise NotImplementedError('Method [next] should be implemented.') class Trainer(AbstractTrainer): r"""The basic Trainer for basic training and evaluation strategies in recommender systems. This class defines common functions for training and evaluation processes of most recommender system models, including fit(), evaluate(), and some other features helpful for model training and evaluation. Generally speaking, this class can serve most recommender system models, If the training process of the model is to simply optimize a single loss without involving any complex training strategies, such as adversarial learning, pre-training and so on. Initializing the Trainer needs two parameters: `config` and `model`. `config` records the parameters information for controlling training and evaluation, such as `learning_rate`, `epochs`, `eval_step` and so on. More information can be found in [placeholder]. `model` is the instantiated object of a Model Class. """ def __init__(self, config, model, mg=False): super(Trainer, self).__init__(config, model) self.logger = getLogger() self.learner = config['learner'] self.learning_rate = config['learning_rate'] self.epochs = config['epochs'] self.eval_step = min(config['eval_step'], self.epochs) self.stopping_step = config['stopping_step'] self.clip_grad_norm = config['clip_grad_norm'] self.valid_metric = config['valid_metric'].lower() self.valid_metric_bigger = config['valid_metric_bigger'] self.test_batch_size = config['eval_batch_size'] self.device = config['device'] self.weight_decay = 0.0 if config['weight_decay'] is not None: wd = config['weight_decay'] self.weight_decay = eval(wd) if isinstance(wd, str) else wd self.req_training = config['req_training'] self.start_epoch = 0 self.cur_step = 0 tmp_dd = {} for j, k in list(itertools.product(config['metrics'], config['topk'])): tmp_dd[f'{j.lower()}@{k}'] = 0.0 self.best_valid_score = -1 self.best_valid_result = tmp_dd self.best_test_upon_valid = tmp_dd self.train_loss_dict = dict() self.optimizer = self._build_optimizer() #fac = lambda epoch: 0.96 ** (epoch / 50) lr_scheduler = config['learning_rate_scheduler'] # check zero? fac = lambda epoch: lr_scheduler[0] ** (epoch / lr_scheduler[1]) scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=fac) self.lr_scheduler = scheduler self.eval_type = config['eval_type'] self.evaluator = TopKEvaluator(config) self.item_tensor = None self.tot_item_num = None self.mg = mg self.alpha1 = config['alpha1'] self.alpha2 = config['alpha2'] self.beta = config['beta'] def _build_optimizer(self): r"""Init the Optimizer Returns: torch.optim: the optimizer """ if self.learner.lower() == 'adam': optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) elif self.learner.lower() == 'sgd': optimizer = optim.SGD(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) elif self.learner.lower() == 'adagrad': optimizer = optim.Adagrad(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) elif self.learner.lower() == 'rmsprop': optimizer = optim.RMSprop(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) else: self.logger.warning('Received unrecognized optimizer, set default Adam optimizer') optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate) return optimizer def _train_epoch(self, train_data, epoch_idx, loss_func=None): r"""Train the model in an epoch Args: train_data (DataLoader): The train data. epoch_idx (int): The current epoch id. loss_func (function): The loss function of :attr:`model`. If it is ``None``, the loss function will be :attr:`self.model.calculate_loss`. Defaults to ``None``. Returns: float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a tuple which includes the sum of loss in each part. """ if not self.req_training: return 0.0, [] self.model.train() loss_func = loss_func or self.model.calculate_loss total_loss = None loss_batches = [] for batch_idx, interaction in enumerate(train_data): self.optimizer.zero_grad() second_inter = interaction.clone() losses = loss_func(interaction) if isinstance(losses, tuple): loss = sum(losses) loss_tuple = tuple(per_loss.item() for per_loss in losses) total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple))) else: loss = losses total_loss = losses.item() if total_loss is None else total_loss + losses.item() if self._check_nan(loss): self.logger.info('Loss is nan at epoch: {}, batch index: {}. Exiting.'.format(epoch_idx, batch_idx)) return loss, torch.tensor(0.0) if self.mg and batch_idx % self.beta == 0: first_loss = self.alpha1 * loss first_loss.backward() self.optimizer.step() self.optimizer.zero_grad() losses = loss_func(second_inter) if isinstance(losses, tuple): loss = sum(losses) else: loss = losses if self._check_nan(loss): self.logger.info('Loss is nan at epoch: {}, batch index: {}. Exiting.'.format(epoch_idx, batch_idx)) return loss, torch.tensor(0.0) second_loss = -1 * self.alpha2 * loss second_loss.backward() else: loss.backward() if self.clip_grad_norm: clip_grad_norm_(self.model.parameters(), **self.clip_grad_norm) self.optimizer.step() loss_batches.append(loss.detach()) # for test #if batch_idx == 0: # break return total_loss, loss_batches def _valid_epoch(self, valid_data): r"""Valid the model with valid data Args: valid_data (DataLoader): the valid data Returns: float: valid score dict: valid result """ valid_result = self.evaluate(valid_data) valid_score = valid_result[self.valid_metric] if self.valid_metric else valid_result['NDCG@20'] return valid_score, valid_result def _check_nan(self, loss): if torch.isnan(loss): #raise ValueError('Training loss is nan') return True def _generate_train_loss_output(self, epoch_idx, s_time, e_time, losses): train_loss_output = 'epoch %d training [time: %.2fs, ' % (epoch_idx, e_time - s_time) if isinstance(losses, tuple): train_loss_output = ', '.join('train_loss%d: %.4f' % (idx + 1, loss) for idx, loss in enumerate(losses)) else: train_loss_output += 'train loss: %.4f' % losses return train_loss_output + ']' def fit(self, train_data, valid_data=None, test_data=None, saved=False, verbose=True): r"""Train the model based on the train data and the valid data. Args: train_data (DataLoader): the train data valid_data (DataLoader, optional): the valid data, default: None. If it's None, the early_stopping is invalid. test_data (DataLoader, optional): None verbose (bool, optional): whether to write training and evaluation information to logger, default: True saved (bool, optional): whether to save the model parameters, default: True Returns: (float, dict): best valid score and best valid result. If valid_data is None, it returns (-1, None) """ for epoch_idx in range(self.start_epoch, self.epochs): # train training_start_time = time() self.model.pre_epoch_processing() train_loss, _ = self._train_epoch(train_data, epoch_idx) if torch.is_tensor(train_loss): # get nan loss break #for param_group in self.optimizer.param_groups: # print('======lr: ', param_group['lr']) self.lr_scheduler.step() self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss training_end_time = time() train_loss_output = \ self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss) post_info = self.model.post_epoch_processing() if verbose: self.logger.info(train_loss_output) if post_info is not None: self.logger.info(post_info) # eval: To ensure the test result is the best model under validation data, set self.eval_step == 1 if (epoch_idx + 1) % self.eval_step == 0: valid_start_time = time() valid_score, valid_result = self._valid_epoch(valid_data) self.best_valid_score, self.cur_step, stop_flag, update_flag = early_stopping( valid_score, self.best_valid_score, self.cur_step, max_step=self.stopping_step, bigger=self.valid_metric_bigger) valid_end_time = time() valid_score_output = "epoch %d evaluating [time: %.2fs, valid_score: %f]" % \ (epoch_idx, valid_end_time - valid_start_time, valid_score) valid_result_output = 'valid result: \n' + dict2str(valid_result) # test _, test_result = self._valid_epoch(test_data) if verbose: self.logger.info(valid_score_output) self.logger.info(valid_result_output) self.logger.info('test result: \n' + dict2str(test_result)) if update_flag: update_output = '██ ' + self.config['model'] + '--Best validation results updated!!!' if verbose: self.logger.info(update_output) self.best_valid_result = valid_result self.best_test_upon_valid = test_result if stop_flag: stop_output = '+++++Finished training, best eval result in epoch %d' % \ (epoch_idx - self.cur_step * self.eval_step) if verbose: self.logger.info(stop_output) break return self.best_valid_score, self.best_valid_result, self.best_test_upon_valid @torch.no_grad() def evaluate(self, eval_data, is_test=False, idx=0): r"""Evaluate the model based on the eval data. Returns: dict: eval result, key is the eval metric and value in the corresponding metric value """ self.model.eval() # batch full users batch_matrix_list = [] for batch_idx, batched_data in enumerate(eval_data): # predict: interaction without item ids scores = self.model.full_sort_predict(batched_data) masked_items = batched_data[1] # mask out pos items scores[masked_items[0], masked_items[1]] = -1e10 # rank and get top-k _, topk_index = torch.topk(scores, max(self.config['topk']), dim=-1) # nusers x topk batch_matrix_list.append(topk_index) return self.evaluator.evaluate(batch_matrix_list, eval_data, is_test=is_test, idx=idx) def plot_train_loss(self, show=True, save_path=None): r"""Plot the train loss in each epoch Args: show (bool, optional): whether to show this figure, default: True save_path (str, optional): the data path to save the figure, default: None. If it's None, it will not be saved. """ epochs = list(self.train_loss_dict.keys()) epochs.sort() values = [float(self.train_loss_dict[epoch]) for epoch in epochs] plt.plot(epochs, values) plt.xticks(epochs) plt.xlabel('Epoch') plt.ylabel('Loss') if show: plt.show() if save_path: plt.savefig(save_path) ================================================ FILE: src/configs/dataset/baby.yaml ================================================ # Common Features USER_ID_FIELD: userID ITEM_ID_FIELD: itemID #RATING_FIELD: rating TIME_FIELD: timestamp filter_out_cod_start_users: True inter_file_name: 'baby.inter' # name of features vision_feature_file: 'image_feat.npy' text_feature_file: 'text_feat.npy' user_graph_dict_file: 'user_graph_dict.npy' field_separator: "\t" ================================================ FILE: src/configs/dataset/clothing.yaml ================================================ # Common Features USER_ID_FIELD: userID ITEM_ID_FIELD: itemID #RATING_FIELD: rating TIME_FIELD: timestamp filter_out_cod_start_users: True inter_file_name: 'clothing.inter' # name of features vision_feature_file: 'image_feat.npy' text_feature_file: 'text_feat.npy' user_graph_dict_file: 'user_graph_dict.npy' field_separator: "\t" ================================================ FILE: src/configs/dataset/elec.yaml ================================================ # Common Features USER_ID_FIELD: userID ITEM_ID_FIELD: itemID #RATING_FIELD: rating TIME_FIELD: timestamp filter_out_cod_start_users: True inter_file_name: 'elec.inter' # name of features vision_feature_file: 'image_feat.npy' text_feature_file: 'text_feat.npy' user_graph_dict_file: 'user_graph_dict.npy' field_separator: "\t" ================================================ FILE: src/configs/dataset/microlens.yaml ================================================ # Common Features USER_ID_FIELD: userID ITEM_ID_FIELD: itemID #RATING_FIELD: rating TIME_FIELD: timestamp filter_out_cod_start_users: True inter_file_name: 'microlens.inter' # name of features vision_feature_file: 'image_feat.npy' text_feature_file: 'text_feat.npy' user_graph_dict_file: 'user_graph_dict.npy' field_separator: "\t" ================================================ FILE: src/configs/dataset/sports.yaml ================================================ # Common Features USER_ID_FIELD: userID ITEM_ID_FIELD: itemID #RATING_FIELD: rating TIME_FIELD: timestamp filter_out_cod_start_users: True inter_file_name: 'sports.inter' # name of features vision_feature_file: 'image_feat.npy' text_feature_file: 'text_feat.npy' user_graph_dict_file: 'user_graph_dict.npy' field_separator: "\t" ================================================ FILE: src/configs/mg.yaml ================================================ alpha1: [1.0] alpha2: [0.1, 0.2, 0.3] beta: [3] hyper_parameters: ["alpha1", "alpha2", "beta"] ================================================ FILE: src/configs/model/BM3.yaml ================================================ embedding_size: 64 feat_embed_dim: 64 n_layers: [1, 2] dropout: [0.3, 0.5] reg_weight: [0.1, 0.01] cl_weight: 2.0 use_neg_sampling: False hyper_parameters: ["n_layers", "reg_weight", "dropout"] ================================================ FILE: src/configs/model/BPR.yaml ================================================ embedding_size: 64 is_multimodal_model: False reg_weight: [2.0, 1.0, 1e-01, 1e-02, 1e-03, 1e-04, 1e-05] hyper_parameters: ["reg_weight"] ================================================ FILE: src/configs/model/DAMRS.yaml ================================================ embedding_size: 64 feat_embed_dim: 64 weight_size: [64, 64] kl_weight: [1] # [10, 0.1, 0.01] # [1] # [0, 0.1, 1e-02, 1e-04, 1e-03] neighbor_weight: [0.001] # [ 1, 0.1, 0.01, 0.001, 0.0001, 0] # [0.001] # [1, 0.1, 0.001, 0.0001] n_mm_layers: [1] # [1, 2] n_ui_layers: [2] # [1, 2, 3] knn_k: 10 # [3, 5, 10, 15, 20] learning_rate: [0.001] # , 0.0005, 0.0001] item_graph_dict_file: 'item_graph_dict_2.npy' hyper_parameters: ["n_ui_layers", "neighbor_weight", "kl_weight", "n_mm_layers", "learning_rate"] ================================================ FILE: src/configs/model/DRAGON.yaml ================================================ embedding_size: 64 feat_embed_dim: 64 n_mm_layers: 1 n_layers: 2 knn_k: 10 mm_image_weight: 0.1 aggr_mode: ['add'] learning_rate: [0.1, 0.01, 0.001, 0.0001, 0.00001] reg_weight: [0.1, 0.01, 0.001, 0.0001, 0.00001] hyper_parameters: ["aggr_mode", "reg_weight", "learning_rate"] ================================================ FILE: src/configs/model/DualGNN.yaml ================================================ embedding_size: 64 n_layers: 2 aggr_mode: ['add'] reg_weight: [0.1, 0.01, 0.001, 0.0001, 0.00001] learning_rate: [0.1, 0.01, 0.001, 0.0001, 0.00001] hyper_parameters: ["aggr_mode", "learning_rate", "reg_weight"] ================================================ FILE: src/configs/model/FREEDOM.yaml ================================================ embedding_size: 64 feat_embed_dim: 64 weight_size: [64, 64] lambda_coeff: 0.9 reg_weight: [0.0, 1e-05, 1e-04, 1e-03] n_mm_layers: 1 n_ui_layers: 2 knn_k: 10 mm_image_weight: 0.1 dropout: [0.8, 0.9] hyper_parameters: ["dropout", "reg_weight"] ================================================ FILE: src/configs/model/GRCN.yaml ================================================ embedding_size: 64 latent_embedding: 64 n_layers: 3 reg_weight: [0.1, 0.01, 0.001, 0.0001, 0.00001] learning_rate: [1, 0.1, 0.01, 0.001, 0.0001] hyper_parameters: ["reg_weight", "learning_rate"] ================================================ FILE: src/configs/model/ItemKNNCBF.yaml ================================================ knn_k: [10] shrink: [10] req_training: False epochs: 1 hyper_parameters: ['shrink', 'knn_k'] ================================================ FILE: src/configs/model/LATTICE.yaml ================================================ embedding_size: 64 feat_embed_dim: 64 weight_size: [64, 64] learning_rate_scheduler: [0.96, 50] lambda_coeff: 0.9 reg_weight: [0.0, 1e-05, 1e-04, 1e-03] cf_model: lightgcn mess_dropout: [0.1, 0.1] n_layers: 1 knn_k: 10 learning_rate: [0.0001, 0.0005, 0.001, 0.005] hyper_parameters: ["reg_weight", "learning_rate"] ================================================ FILE: src/configs/model/LGMRec.yaml ================================================ embedding_size: 64 feat_embed_dim: 64 cf_model: lightgcn n_ui_layers: [2] n_mm_layers: [2] #baby n_hyper_layer: [1] hyper_num: [4] keep_rate: [0.5] alpha: [0.3] # #sports # n_hyper_layer: [1] # hyper_num: [4] # keep_rate: [0.4] # alpha: [0.6]s # #clothing # n_hyper_layer: [2] # hyper_num: [64] # keep_rate: [0.2] # alpha: [0.2] cl_weight : [1e-04] reg_weight: [1e-06] hyper_parameters: ["n_ui_layers", "n_mm_layers", "n_hyper_layer", "hyper_num", "keep_rate", "alpha", "cl_weight", "reg_weight"] ================================================ FILE: src/configs/model/LayerGCN.yaml ================================================ embedding_size: 64 n_layers: [4] reg_weight: [1e-02, 1e-03, 1e-04, 1e-05] dropout: [0.0, 0.1, 0.2] hyper_parameters: ["n_layers", "dropout", "reg_weight"] ================================================ FILE: src/configs/model/LightGCN.yaml ================================================ embedding_size: 64 is_multimodal_model: False n_layers: [1, 2, 3, 4] reg_weight: [1e-02, 1e-03, 1e-04, 1e-05, 1e-06] hyper_parameters: ["n_layers", "reg_weight"] ================================================ FILE: src/configs/model/MGCN.yaml ================================================ embedding_size: 64 n_ui_layers: 2 n_layers: 1 learning_rate_scheduler: [0.96, 50] lambda_coeff: 0.9 reg_weight: 1e-04 knn_k: 10 learning_rate: 0.001 cl_loss: [0.001,0.01,0.1] hyper_parameters: ["cl_loss"] ================================================ FILE: src/configs/model/MMGCN.yaml ================================================ embedding_size: 64 n_layers: 2 reg_weight: [0, 0.00001, 0.0001, 0.001, 0.01, 0.1] learning_rate: [0.0001, 0.0005, 0.001, 0.005, 0.01] hyper_parameters: ["reg_weight", "learning_rate"] ================================================ FILE: src/configs/model/MVGAE.yaml ================================================ embedding_size: 64 n_layers: 1 #reg_weight: [0.1, 0.01, 0.001, 0.0001, 0.00001] learning_rate: [0.0001, 0.001, 0.01, 0.1] beta: [0.01, 0.1, 1] hyper_parameters: ["learning_rate", "beta"] ================================================ FILE: src/configs/model/PGL.yaml ================================================ embedding_size: 64 feat_embed_dim: 64 weight_size: [64, 64] learning_rate_scheduler: [0.96, 50] lambda_coeff: 0.9 learning_rate: 0.001 reg_weight: [0] n_mm_layers: 1 n_ui_layers: 2 knn_k: 10 mm_image_weight: 0.1 dropout: [0.2] mode: ['local'] hyper_parameters: ["dropout","reg_weight","mode"] ================================================ FILE: src/configs/model/SELFCFED_LGN.yaml ================================================ embedding_size: 64 n_layers: [1, 2] dropout: [0.1, 0.2, 0.5] reg_weight: [1e-01, 1e-02, 1e-03, 1e-04, 1e-05, 0.0] use_neg_sampling: False hyper_parameters: ["n_layers", "dropout", "reg_weight"] ================================================ FILE: src/configs/model/SLMRec.yaml ================================================ recdim: 64 layer_num: 3 reg: [0.0001, 0.001, 0.01, 0.1] ssl_task: 'FAC' learning_rate: [0.0001, 0.001, 0.01, 0.1] weight_decay: 1e-4 ssl_alpha: [0.01, 0.05, 0.1, 0.5, 1.0] ssl_temp: [0.1, 0.2, 0.5, 1.0] dropout_rate: 0.3 mm_fusion_mode: 'concat' temp: 0.2 init: 'xavier' adj_type: 'pre' hyper_parameters: ["learning_rate", "ssl_temp", "ssl_alpha", "reg"] ================================================ FILE: src/configs/model/SMORE.yaml ================================================ embedding_size: 64 n_ui_layers: [3,4] n_layers: 1 learning_rate_scheduler: [0.96, 50] lambda_coeff: 0.9 reg_weight: [1e-5,1e-4] learning_rate: 0.001 cl_loss: 0.01 temperature: 0.2 image_knn_k: [10,15,20,40] text_knn_k: [10,15,20,40] dropout_rate : [0, 0.1] hyper_parameters: ["n_ui_layers", "image_knn_k", "text_knn_k", "reg_weight", "dropout_rate"] ================================================ FILE: src/configs/model/VBPR.yaml ================================================ embedding_size: 64 reg_weight: [2.0, 1.0, 1e-01, 1e-02, 1e-03, 1e-04, 1e-05] hyper_parameters: ["reg_weight"] ================================================ FILE: src/configs/overall.yaml ================================================ # general gpu_id: 0 use_gpu: True seed: [999] # multi-modal raw features data_path: '../data/' inter_splitting_label: 'x_label' filter_out_cod_start_users: True is_multimodal_model: True checkpoint_dir: 'saved' save_recommended_topk: True recommend_topk: 'recommend_topk/' embedding_size: 64 weight_decay: 0.0 req_training: True #embedding_size: 3780 # training settings epochs: 1000 stopping_step: 20 train_batch_size: 2048 learner: adam learning_rate: 0.001 learning_rate_scheduler: [1.0, 50] eval_step: 1 training_neg_sample_num: 1 use_neg_sampling: True use_full_sampling: False NEG_PREFIX: neg__ USER_ID_FIELD: user_id:token ITEM_ID_FIELD: item_id:token TIME_FIELD: timestamp:float field_separator: "\t" # evaluation settings metrics: ["Recall", "NDCG", "Precision", "MAP"] topk: [5, 10, 20, 50] valid_metric: Recall@20 eval_batch_size: 4096 # use_raw_features: False max_txt_len: 32 max_img_size: 256 vocab_size: 30522 type_vocab_size: 2 hidden_size: 4 pad_token_id: 0 max_position_embeddings: 512 layer_norm_eps: 1e-12 hidden_dropout_prob: 0.1 end2end: False # iteration parameters hyper_parameters: ["seed"] ================================================ FILE: src/main.py ================================================ # coding: utf-8 # @email: enoche.chow@gmail.com """ Main entry # UPDATED: 2022-Feb-15 ########################## """ import os import argparse from utils.quick_start import quick_start os.environ['NUMEXPR_MAX_THREADS'] = '48' if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model', '-m', type=str, default='SELFCFED_LGN', help='name of models') parser.add_argument('--dataset', '-d', type=str, default='baby', help='name of datasets') config_dict = { 'gpu_id': 0, } args, _ = parser.parse_known_args() quick_start(model=args.model, dataset=args.dataset, config_dict=config_dict, save_model=True) ================================================ FILE: src/models/bm3.py ================================================ # coding: utf-8 # @email: enoche.chow@gmail.com r""" ################################################ paper: Bootstrap Latent Representations for Multi-modal Recommendation https://arxiv.org/abs/2207.05969 """ import os import copy import random import numpy as np import scipy.sparse as sp import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.functional import cosine_similarity from common.abstract_recommender import GeneralRecommender from common.loss import EmbLoss class BM3(GeneralRecommender): def __init__(self, config, dataset): super(BM3, self).__init__(config, dataset) self.embedding_dim = config['embedding_size'] self.feat_embed_dim = config['embedding_size'] self.n_layers = config['n_layers'] self.reg_weight = config['reg_weight'] self.cl_weight = config['cl_weight'] self.dropout = config['dropout'] self.n_nodes = self.n_users + self.n_items # load dataset info self.norm_adj = self.get_norm_adj_mat(dataset.inter_matrix(form='coo').astype(np.float32)).to(self.device) self.user_embedding = nn.Embedding(self.n_users, self.embedding_dim) self.item_id_embedding = nn.Embedding(self.n_items, self.embedding_dim) nn.init.xavier_uniform_(self.user_embedding.weight) nn.init.xavier_uniform_(self.item_id_embedding.weight) self.predictor = nn.Linear(self.embedding_dim, self.embedding_dim) self.reg_loss = EmbLoss() nn.init.xavier_normal_(self.predictor.weight) if self.v_feat is not None: self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=False) self.image_trs = nn.Linear(self.v_feat.shape[1], self.feat_embed_dim) nn.init.xavier_normal_(self.image_trs.weight) if self.t_feat is not None: self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=False) self.text_trs = nn.Linear(self.t_feat.shape[1], self.feat_embed_dim) nn.init.xavier_normal_(self.text_trs.weight) def get_norm_adj_mat(self, interaction_matrix): A = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32) inter_M = interaction_matrix inter_M_t = interaction_matrix.transpose() data_dict = dict(zip(zip(inter_M.row, inter_M.col + self.n_users), [1] * inter_M.nnz)) data_dict.update(dict(zip(zip(inter_M_t.row + self.n_users, inter_M_t.col), [1] * inter_M_t.nnz))) A._update(data_dict) # norm adj matrix sumArr = (A > 0).sum(axis=1) # add epsilon to avoid Devide by zero Warning diag = np.array(sumArr.flatten())[0] + 1e-7 diag = np.power(diag, -0.5) D = sp.diags(diag) L = D * A * D # covert norm_adj matrix to tensor L = sp.coo_matrix(L) row = L.row col = L.col i = torch.LongTensor(np.array([row, col])) data = torch.FloatTensor(L.data) return torch.sparse.FloatTensor(i, data, torch.Size((self.n_nodes, self.n_nodes))) def forward(self): h = self.item_id_embedding.weight ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0) all_embeddings = [ego_embeddings] for i in range(self.n_layers): ego_embeddings = torch.sparse.mm(self.norm_adj, ego_embeddings) all_embeddings += [ego_embeddings] all_embeddings = torch.stack(all_embeddings, dim=1) all_embeddings = all_embeddings.mean(dim=1, keepdim=False) u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0) return u_g_embeddings, i_g_embeddings + h def calculate_loss(self, interactions): # online network u_online_ori, i_online_ori = self.forward() t_feat_online, v_feat_online = None, None if self.t_feat is not None: t_feat_online = self.text_trs(self.text_embedding.weight) if self.v_feat is not None: v_feat_online = self.image_trs(self.image_embedding.weight) with torch.no_grad(): u_target, i_target = u_online_ori.clone(), i_online_ori.clone() u_target.detach() i_target.detach() u_target = F.dropout(u_target, self.dropout) i_target = F.dropout(i_target, self.dropout) if self.t_feat is not None: t_feat_target = t_feat_online.clone() t_feat_target = F.dropout(t_feat_target, self.dropout) if self.v_feat is not None: v_feat_target = v_feat_online.clone() v_feat_target = F.dropout(v_feat_target, self.dropout) u_online, i_online = self.predictor(u_online_ori), self.predictor(i_online_ori) users, items = interactions[0], interactions[1] u_online = u_online[users, :] i_online = i_online[items, :] u_target = u_target[users, :] i_target = i_target[items, :] loss_t, loss_v, loss_tv, loss_vt = 0.0, 0.0, 0.0, 0.0 if self.t_feat is not None: t_feat_online = self.predictor(t_feat_online) t_feat_online = t_feat_online[items, :] t_feat_target = t_feat_target[items, :] loss_t = 1 - cosine_similarity(t_feat_online, i_target.detach(), dim=-1).mean() loss_tv = 1 - cosine_similarity(t_feat_online, t_feat_target.detach(), dim=-1).mean() if self.v_feat is not None: v_feat_online = self.predictor(v_feat_online) v_feat_online = v_feat_online[items, :] v_feat_target = v_feat_target[items, :] loss_v = 1 - cosine_similarity(v_feat_online, i_target.detach(), dim=-1).mean() loss_vt = 1 - cosine_similarity(v_feat_online, v_feat_target.detach(), dim=-1).mean() loss_ui = 1 - cosine_similarity(u_online, i_target.detach(), dim=-1).mean() loss_iu = 1 - cosine_similarity(i_online, u_target.detach(), dim=-1).mean() return (loss_ui + loss_iu).mean() + self.reg_weight * self.reg_loss(u_online_ori, i_online_ori) + \ self.cl_weight * (loss_t + loss_v + loss_tv + loss_vt).mean() def full_sort_predict(self, interaction): user = interaction[0] u_online, i_online = self.forward() u_online, i_online = self.predictor(u_online), self.predictor(i_online) score_mat_ui = torch.matmul(u_online[user], i_online.transpose(0, 1)) return score_mat_ui ================================================ FILE: src/models/bpr.py ================================================ # coding: utf-8 # @email: enoche.chow@gmail.com r""" BPR, with only u-i graph ################################################ Reference: Steffen Rendle et al. "BPR: Bayesian Personalized Ranking from Implicit Feedback." in UAI 2009. """ import numpy as np import torch import torch.nn as nn from common.abstract_recommender import GeneralRecommender from common.loss import BPRLoss, EmbLoss from common.init import xavier_normal_initialization import torch.nn.functional as F from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout class BPR(GeneralRecommender): r"""BPR is a basic matrix factorization model that be trained in the pairwise way. """ def __init__(self, config, dataset): super(BPR, self).__init__(config, dataset) # load parameters info self.embedding_size = config['embedding_size'] self.reg_weight = config['reg_weight'] # float32 type: the weight decay for l2 normalizaton # define layers and loss self.user_embedding = nn.Embedding(self.n_users, self.embedding_size) self.item_embedding = nn.Embedding(self.n_items, self.embedding_size) self.loss = BPRLoss() self.reg_loss = EmbLoss() # parameters initialization self.apply(xavier_normal_initialization) def get_user_embedding(self, user): r""" Get a batch of user embedding tensor according to input user's id. Args: user (torch.LongTensor): The input tensor that contains user's id, shape: [batch_size, ] Returns: torch.FloatTensor: The embedding tensor of a batch of user, shape: [batch_size, embedding_size] """ return self.user_embedding(user) def get_item_embedding(self, item): r""" Get a batch of item embedding tensor according to input item's id. Args: item (torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ] Returns: torch.FloatTensor: The embedding tensor of a batch of item, shape: [batch_size, embedding_size] """ return self.item_embedding(item) def forward(self, dropout=0.0): user_e = F.dropout(self.user_embedding.weight, dropout) item_e = F.dropout(self.item_embedding.weight, dropout) return user_e, item_e def calculate_loss(self, interaction): """ loss on one batch :param interaction: batch data format: tensor(3, batch_size) [0]: user list; [1]: positive items; [2]: negative items :return: """ user = interaction[0] pos_item = interaction[1] neg_item = interaction[2] user_embeddings, item_embeddings = self.forward() user_e = user_embeddings[user, :] pos_e = item_embeddings[pos_item, :] neg_e = self.get_item_embedding(neg_item) pos_item_score, neg_item_score = torch.mul(user_e, pos_e).sum(dim=1), torch.mul(user_e, neg_e).sum(dim=1) mf_loss = self.loss(pos_item_score, neg_item_score) reg_loss = self.reg_loss(user_e, pos_e, neg_e) loss = mf_loss + self.reg_weight * reg_loss return loss def full_sort_predict(self, interaction): user = interaction[0] user_e = self.get_user_embedding(user) all_item_e = self.item_embedding.weight score = torch.matmul(user_e, all_item_e.transpose(0, 1)) # n_users * n_items return score ================================================ FILE: src/models/damrs.py ================================================ # coding: utf-8 import os import random import numpy as np import scipy.sparse as sp import torch import torch.nn as nn import torch.nn.functional as F from common.abstract_recommender import GeneralRecommender class DAMRS(GeneralRecommender): def __init__(self, config, dataset): super(DAMRS, self).__init__(config, dataset) self.embedding_dim = config['embedding_size'] self.lambda_coeff = config['lambda_coeff'] self.cf_model = config['cf_model'] self.knn_k = config['knn_k'] self.n_layers = config['n_mm_layers'] self.n_ui_layers = config['n_ui_layers'] self.reg_weight = config['reg_weight'] self.kl_weight = config['kl_weight'] self.neighbor_weight = config['neighbor_weight'] self.build_item_graph = True self.n_nodes = self.n_users + self.n_items # load dataset info self.interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32) self.norm_adj = self.get_norm_adj_mat().to(self.device) self.user_embedding = nn.Embedding(self.n_users, self.embedding_dim) self.item_id_embedding = nn.Embedding(self.n_items, self.embedding_dim) nn.init.xavier_uniform_(self.user_embedding.weight) nn.init.xavier_uniform_(self.item_id_embedding.weight) if self.v_feat is not None: self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=True) self.image_trs = nn.Linear(self.v_feat.shape[1], self.embedding_dim) if self.t_feat is not None: self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=True) self.text_trs = nn.Linear(self.t_feat.shape[1], self.embedding_dim) self.image_adj, self.text_adj = self.get_knn_adj_mat(self.image_embedding.weight.detach(), self.text_embedding.weight.detach()) dataset_path = os.path.abspath(config['data_path'] + config['dataset']) self.item_graph_dict = np.load(os.path.join(dataset_path, config['item_graph_dict_file']), allow_pickle=True).item() __, self.session_adj = self.get_session_adj() def get_knn_adj_mat(self, v_embeddings, t_embeddings): v_context_norm = v_embeddings.div(torch.norm(v_embeddings, p=2, dim=-1, keepdim=True)) v_sim = torch.mm(v_context_norm, v_context_norm.transpose(1, 0)) t_context_norm = t_embeddings.div(torch.norm(t_embeddings, p=2, dim=-1, keepdim=True)) t_sim = torch.mm(t_context_norm, t_context_norm.transpose(1, 0)) mask_v = v_sim < v_sim.mean() mask_t = t_sim < t_sim.mean() t_sim[mask_v] = 0 v_sim[mask_t] = 0 t_sim[mask_t] = 0 v_sim[mask_v] = 0 index_x = [] index_v = [] index_t = [] all_items = np.arange(self.n_items).tolist() def _random(): rd_id = random.sample(all_items, 9) # [0] return rd_id for i in range(self.n_items): item_num = len(torch.nonzero(t_sim[i])) if item_num <= self.knn_k: _, v_knn_ind = torch.topk(v_sim[i], item_num) _, t_knn_ind = torch.topk(t_sim[i], item_num) else: _, v_knn_ind = torch.topk(v_sim[i], self.knn_k) _, t_knn_ind = torch.topk(t_sim[i], self.knn_k) index_x.append(torch.ones_like(v_knn_ind) * i) index_v.append(v_knn_ind) index_t.append(t_knn_ind) index_x = torch.cat(index_x, dim=0).cuda() index_v = torch.cat(index_v, dim=0).cuda() index_t = torch.cat(index_t, dim=0).cuda() adj_size = (self.n_items, self.n_items) del v_sim, t_sim v_indices = torch.stack((torch.flatten(index_x), torch.flatten(index_v)), 0) t_indices = torch.stack((torch.flatten(index_x), torch.flatten(index_t)), 0) # norm return self.compute_normalized_laplacian(v_indices, adj_size), self.compute_normalized_laplacian(t_indices, adj_size) def compute_normalized_laplacian(self, indices, adj_size): adj = torch.sparse.FloatTensor(indices, torch.ones_like(indices[0]), adj_size) row_sum = 1e-7 + torch.sparse.sum(adj, -1).to_dense() r_inv_sqrt = torch.pow(row_sum, -0.5) rows_inv_sqrt = r_inv_sqrt[indices[0]] cols_inv_sqrt = r_inv_sqrt[indices[1]] values = rows_inv_sqrt * cols_inv_sqrt return torch.sparse.FloatTensor(indices, values, adj_size) def get_session_adj(self): index_x = [] index_y = [] values = [] for i in range(self.n_items): index_x.append(i) index_y.append(i) values.append(1) if i in self.item_graph_dict.keys(): item_graph_sample = self.item_graph_dict[i][0] item_graph_weight = self.item_graph_dict[i][1] for j in range(len(item_graph_sample)): index_x.append(i) index_y.append(item_graph_sample[j]) values.append(item_graph_weight[j]) index_x = torch.tensor(index_x, dtype=torch.long) index_y = torch.tensor(index_y, dtype=torch.long) indices = torch.stack((index_x, index_y), 0).to(self.device) # norm return indices, self.compute_normalized_laplacian(indices, (self.n_items, self.n_items)) def label_prediction(self, emb, aug_emb): n_emb = F.normalize(emb, dim=1) n_aug_emb = F.normalize(aug_emb, dim=1) prob = torch.mm(n_emb, n_aug_emb.transpose(0, 1)) prob = F.softmax(prob, dim=1) del n_emb, n_aug_emb return prob def generate_pesudo_labels(self, prob1, prob2, prob3): positive = prob1 + prob2 + prob3 + prob3 _, mm_pos_ind = torch.topk(positive, 10, dim=-1) prob = prob3.clone() prob.scatter_(1, mm_pos_ind, 0) _, single_pos_ind = torch.topk(prob, 10, dim=-1) return mm_pos_ind, single_pos_ind def neighbor_discrimination(self, mm_positive, s_positive, emb, aug_emb, temperature=0.2): def score(x1, x2): return torch.sum(torch.mul(x1, x2), dim=2) n_aug_emb = F.normalize(aug_emb, dim=1) n_emb = F.normalize(emb, dim=1) mm_pos_emb = n_aug_emb[mm_positive] s_pos_emb = n_aug_emb[s_positive] emb2 = torch.reshape(n_emb, [-1, 1, self.embedding_dim]) emb2 = torch.tile(emb2, [1, 10, 1]) mm_pos_score = score(emb2, mm_pos_emb) s_pos_score = score(emb2, s_pos_emb) ttl_score = torch.matmul(n_emb, n_aug_emb.transpose(0, 1)) mm_pos_score = torch.sum(torch.exp(mm_pos_score / temperature), dim=1) s_pos_score = torch.sum(torch.exp(s_pos_score / temperature), dim=1) ttl_score = torch.exp(ttl_score / temperature).sum(dim=1) # 1 cl_loss = - torch.log(mm_pos_score / (ttl_score) + 10e-10) - torch.log( s_pos_score / (ttl_score - mm_pos_score) + 10e-10) return torch.mean(cl_loss) def KL(self, p1, p2): return p1 * torch.log(p1) - p1 * torch.log(p2) + \ (1 - p1) * torch.log(1 - p1) - (1 - p1) * torch.log(1 - p2) def get_norm_adj_mat(self): A = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32) inter_M = self.interaction_matrix inter_M_t = self.interaction_matrix.transpose() data_dict = dict(zip(zip(inter_M.row, inter_M.col + self.n_users), [1] * inter_M.nnz)) data_dict.update(dict(zip(zip(inter_M_t.row + self.n_users, inter_M_t.col), [1] * inter_M_t.nnz))) A._update(data_dict) # norm adj matrix sumArr = (A > 0).sum(axis=1) # add epsilon to avoid Devide by zero Warning diag = np.array(sumArr.flatten())[0] + 1e-7 diag = np.power(diag, -0.5) D = sp.diags(diag) L = D * A * D # covert norm_adj matrix to tensor L = sp.coo_matrix(L) row = L.row col = L.col i = torch.LongTensor(np.array([row, col])) data = torch.FloatTensor(L.data) return torch.sparse.FloatTensor(i, data, torch.Size((self.n_nodes, self.n_nodes))) def forward(self): ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0) all_embeddings = [ego_embeddings] for i in range(self.n_ui_layers): side_embeddings = torch.sparse.mm(self.norm_adj, ego_embeddings) ego_embeddings = side_embeddings all_embeddings += [ego_embeddings] all_embeddings = torch.stack(all_embeddings, dim=1) all_embeddings = all_embeddings.mean(dim=1, keepdim=False) u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0) del ego_embeddings, side_embeddings # text emb h_t = self.item_id_embedding.weight.clone() for i in range(self.n_layers): h_t = torch.sparse.mm(self.text_adj, h_t) # image emb h_v = self.item_id_embedding.weight.clone() for i in range(self.n_layers): h_v = torch.sparse.mm(self.image_adj, h_v) # session emb h_s = self.item_id_embedding.weight.clone() for i in range(self.n_layers): h_s = torch.sparse.mm(self.session_adj, h_s) return u_g_embeddings, i_g_embeddings, h_t, h_v, h_s def calculate_loss(self, interaction): users = interaction[0] pos_items = interaction[1] neg_items = interaction[2] user_embeddings, item_embeddings, h_t, h_v, h_s = self.forward() self.build_item_graph = False u_idx = torch.unique(users, return_inverse=True, sorted=False) i_idx = torch.unique(torch.cat((pos_items, neg_items)), return_inverse=True, sorted=False) u_id = u_idx[0] i_id = i_idx[0] # text label_prediction_t = self.label_prediction(h_t[i_id], h_t) # visual label_prediction_v = self.label_prediction(h_v[i_id], h_v) # session label_prediction_s = self.label_prediction(h_s[i_id], h_s) mm_postive_s, s_postive_s = self.generate_pesudo_labels(label_prediction_t, label_prediction_v, label_prediction_s) neighbor_dis_loss_1 = self.neighbor_discrimination(mm_postive_s, s_postive_s, h_s[i_id], h_s) mm_postive_v, s_postive_v = self.generate_pesudo_labels(label_prediction_t, label_prediction_s, label_prediction_v) neighbor_dis_loss_2 = self.neighbor_discrimination(mm_postive_v, s_postive_v, h_v[i_id], h_v) mm_postive_t, s_postive_t = self.generate_pesudo_labels(label_prediction_v, label_prediction_s, label_prediction_t) neighbor_dis_loss_3 = self.neighbor_discrimination(mm_postive_t, s_postive_t, h_t[i_id], h_t) neighbor_dis_loss = (neighbor_dis_loss_1 + neighbor_dis_loss_2 + neighbor_dis_loss_3) / 3.0 n_u_g_embeddings = user_embeddings[u_id] it_embeddings = (h_t + h_s + h_v) / 3.0 p_g = F.sigmoid(torch.matmul(n_u_g_embeddings, F.normalize(item_embeddings[i_id], dim=-1).transpose(0, 1))) p_t = F.sigmoid(torch.matmul(n_u_g_embeddings, F.normalize(it_embeddings[i_id], dim=-1).transpose(0, 1))) KL_loss = torch.mean(self.KL(p_g, p_t) + self.KL(p_t, p_g)) p_weight, n_weight = self.get_weight_modal(users, pos_items, neg_items, user_embeddings, h_t, h_v, h_s) u_g_embeddings = user_embeddings[users] ia_embeddings = item_embeddings + (h_t + h_v + h_s) / 3.0 pos_i_g_embeddings = ia_embeddings[pos_items] neg_i_g_embeddings = ia_embeddings[neg_items] batch_mf_loss = self.bpr_loss(u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings, p_weight, n_weight) return batch_mf_loss + self.neighbor_weight * (neighbor_dis_loss) + KL_loss * self.kl_weight def full_sort_predict(self, interaction): user = interaction[0] user_embeddings, item_embeddings, h_t, h_v, h_s = self.forward() # user_e = user_embeddings[user, :] i_embedding = (h_v + h_t + h_s) / 3.0 all_item_e = item_embeddings + i_embedding score = torch.matmul(user_e, all_item_e.transpose(0, 1)) return score def get_weight_modal(self, users, pos_items, neg_items, user_embeddings, h_t, h_v, h_s): u_g_embeddings = user_embeddings[users] p_t = torch.sum(torch.mul(u_g_embeddings, F.normalize(h_t[pos_items], dim=-1)), dim=1) p_v = torch.sum(torch.mul(u_g_embeddings, F.normalize(h_s[pos_items], dim=-1)), dim=1) p_s = torch.sum(torch.mul(u_g_embeddings, F.normalize(h_v[pos_items], dim=-1)), dim=1) n_t = torch.sum(torch.mul(u_g_embeddings, F.normalize(h_t[neg_items], dim=-1)), dim=1) n_v = torch.sum(torch.mul(u_g_embeddings, F.normalize(h_s[neg_items], dim=-1)), dim=1) n_s = torch.sum(torch.mul(u_g_embeddings, F.normalize(h_v[neg_items], dim=-1)), dim=1) p_tensor = F.sigmoid(torch.stack([p_t, p_v, p_s])) p_variance = torch.var(p_tensor, dim=0).data p_mean_value = torch.mean(p_tensor, dim=0).data p_max_value, _ = torch.max(p_tensor, dim=0) n_tensor = F.sigmoid(torch.stack([n_t, n_v, n_s])) n_mean_value = torch.mean(n_tensor).data p_mean_probability = torch.pow(p_mean_value, 1.0).data p_var_probability = torch.pow(torch.exp(-p_variance).data, 2.0) # 0 ~ 1 pos_weight = p_mean_probability * p_var_probability pos_weight = torch.clamp(pos_weight, 0, 1).data mask = torch.zeros_like(p_mean_value) mask[p_mean_value < n_mean_value] = 1 neg_weight_max = torch.pow((p_max_value - n_mean_value.data), 1.0) * mask neg_weight = torch.clamp(neg_weight_max, 0, 1).data # print(neg_weight) return pos_weight, neg_weight def bpr_loss(self, users, pos_items, neg_items, p_weight, n_weight): pos_scores = torch.sum(torch.mul(users, pos_items), dim=1) neg_scores = torch.sum(torch.mul(users, neg_items), dim=1) p_maxi = torch.log(F.sigmoid(pos_scores - neg_scores)) * p_weight n_maxi = torch.log(F.sigmoid(neg_scores - pos_scores)) * n_weight mf_loss = -torch.mean(p_maxi + n_maxi) # mf_loss = -torch.sum(maxi) return mf_loss ================================================ FILE: src/models/dragon.py ================================================ # coding: utf-8 # # user-graph need to be generated by the following script # tools/generate-u-u-matrix.py import os import numpy as np import scipy.sparse as sp import torch import torch.nn as nn import torch.nn.functional as F from torch_geometric.nn.conv import MessagePassing from torch_geometric.utils import remove_self_loops, add_self_loops, degree import torch_geometric from common.abstract_recommender import GeneralRecommender from common.loss import BPRLoss, EmbLoss from common.init import xavier_uniform_initialization class DRAGON(GeneralRecommender): def __init__(self, config, dataset): super(DRAGON, self).__init__(config, dataset) num_user = self.n_users num_item = self.n_items batch_size = config['train_batch_size'] # not used dim_x = config['embedding_size'] self.feat_embed_dim = config['feat_embed_dim'] self.n_layers = config['n_mm_layers'] self.knn_k = config['knn_k'] self.mm_image_weight = config['mm_image_weight'] has_id = True self.batch_size = batch_size self.num_user = num_user self.num_item = num_item self.k = 40 self.aggr_mode = config['aggr_mode'] self.user_aggr_mode = 'softmax' self.num_layer = 1 self.cold_start = 0 self.dataset = dataset # self.construction = 'weighted_max' self.construction = 'cat' self.reg_weight = config['reg_weight'] self.drop_rate = 0.1 self.v_rep = None self.t_rep = None self.v_preference = None self.t_preference = None self.dim_latent = 64 self.dim_feat = 128 self.MLP_v = nn.Linear(self.dim_latent, self.dim_latent, bias=False) self.MLP_t = nn.Linear(self.dim_latent, self.dim_latent, bias=False) self.mm_adj = None dataset_path = os.path.abspath(config['data_path'] + config['dataset']) self.user_graph_dict = np.load(os.path.join(dataset_path, config['user_graph_dict_file']), allow_pickle=True).item() mm_adj_file = os.path.join(dataset_path, 'mm_adj_{}.pt'.format(self.knn_k)) if self.v_feat is not None: self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=False) self.image_trs = nn.Linear(self.v_feat.shape[1], self.feat_embed_dim) if self.t_feat is not None: self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=False) self.text_trs = nn.Linear(self.t_feat.shape[1], self.feat_embed_dim) if os.path.exists(mm_adj_file): self.mm_adj = torch.load(mm_adj_file) else: if self.v_feat is not None: indices, image_adj = self.get_knn_adj_mat(self.image_embedding.weight.detach()) self.mm_adj = image_adj if self.t_feat is not None: indices, text_adj = self.get_knn_adj_mat(self.text_embedding.weight.detach()) self.mm_adj = text_adj if self.v_feat is not None and self.t_feat is not None: self.mm_adj = self.mm_image_weight * image_adj + (1.0 - self.mm_image_weight) * text_adj del text_adj del image_adj torch.save(self.mm_adj, mm_adj_file) # packing interaction in training into edge_index train_interactions = dataset.inter_matrix(form='coo').astype(np.float32) edge_index = self.pack_edge_index(train_interactions) self.edge_index = torch.tensor(edge_index, dtype=torch.long).t().contiguous().to(self.device) self.edge_index = torch.cat((self.edge_index, self.edge_index[[1, 0]]), dim=1) # pdb.set_trace() self.weight_u = nn.Parameter(nn.init.xavier_normal_( torch.tensor(np.random.randn(self.num_user, 2, 1), dtype=torch.float32, requires_grad=True))) self.weight_u.data = F.softmax(self.weight_u, dim=1) self.weight_i = nn.Parameter(nn.init.xavier_normal_( torch.tensor(np.random.randn(self.num_item, 2, 1), dtype=torch.float32, requires_grad=True))) self.weight_i.data = F.softmax(self.weight_i, dim=1) self.item_index = torch.zeros([self.num_item], dtype=torch.long) index = [] for i in range(self.num_item): self.item_index[i] = i index.append(i) self.drop_percent = self.drop_rate self.single_percent = 1 self.double_percent = 0 drop_item = torch.tensor( np.random.choice(self.item_index, int(self.num_item * self.drop_percent), replace=False)) drop_item_single = drop_item[:int(self.single_percent * len(drop_item))] self.dropv_node_idx_single = drop_item_single[:int(len(drop_item_single) * 1 / 3)] self.dropt_node_idx_single = drop_item_single[int(len(drop_item_single) * 2 / 3):] self.dropv_node_idx = self.dropv_node_idx_single self.dropt_node_idx = self.dropt_node_idx_single mask_cnt = torch.zeros(self.num_item, dtype=int).tolist() for edge in edge_index: mask_cnt[edge[1] - self.num_user] += 1 mask_dropv = [] mask_dropt = [] for idx, num in enumerate(mask_cnt): temp_false = [False] * num temp_true = [True] * num mask_dropv.extend(temp_false) if idx in self.dropv_node_idx else mask_dropv.extend(temp_true) mask_dropt.extend(temp_false) if idx in self.dropt_node_idx else mask_dropt.extend(temp_true) edge_index = edge_index[np.lexsort(edge_index.T[1, None])] edge_index_dropv = edge_index[mask_dropv] edge_index_dropt = edge_index[mask_dropt] self.edge_index_dropv = torch.tensor(edge_index_dropv).t().contiguous().to(self.device) self.edge_index_dropt = torch.tensor(edge_index_dropt).t().contiguous().to(self.device) self.edge_index_dropv = torch.cat((self.edge_index_dropv, self.edge_index_dropv[[1, 0]]), dim=1) self.edge_index_dropt = torch.cat((self.edge_index_dropt, self.edge_index_dropt[[1, 0]]), dim=1) self.MLP_user = nn.Linear(self.dim_latent * 2, self.dim_latent) if self.v_feat is not None: self.v_drop_ze = torch.zeros(len(self.dropv_node_idx), self.v_feat.size(1)).to(self.device) self.v_gcn = GCN(self.dataset, batch_size, num_user, num_item, dim_x, self.aggr_mode, num_layer=self.num_layer, has_id=has_id, dropout=self.drop_rate, dim_latent=64, device=self.device, features=self.v_feat) # 256) if self.t_feat is not None: self.t_drop_ze = torch.zeros(len(self.dropt_node_idx), self.t_feat.size(1)).to(self.device) self.t_gcn = GCN(self.dataset, batch_size, num_user, num_item, dim_x, self.aggr_mode, num_layer=self.num_layer, has_id=has_id, dropout=self.drop_rate, dim_latent=64, device=self.device, features=self.t_feat) self.user_graph = User_Graph_sample(num_user, 'add', self.dim_latent) self.result_embed = nn.Parameter( nn.init.xavier_normal_(torch.tensor(np.random.randn(num_user + num_item, dim_x)))).to(self.device) def get_knn_adj_mat(self, mm_embeddings): context_norm = mm_embeddings.div(torch.norm(mm_embeddings, p=2, dim=-1, keepdim=True)) sim = torch.mm(context_norm, context_norm.transpose(1, 0)) _, knn_ind = torch.topk(sim, self.knn_k, dim=-1) adj_size = sim.size() del sim # construct sparse adj indices0 = torch.arange(knn_ind.shape[0]).to(self.device) indices0 = torch.unsqueeze(indices0, 1) indices0 = indices0.expand(-1, self.knn_k) indices = torch.stack((torch.flatten(indices0), torch.flatten(knn_ind)), 0) # norm return indices, self.compute_normalized_laplacian(indices, adj_size) def compute_normalized_laplacian(self, indices, adj_size): adj = torch.sparse.FloatTensor(indices, torch.ones_like(indices[0]), adj_size) row_sum = 1e-7 + torch.sparse.sum(adj, -1).to_dense() r_inv_sqrt = torch.pow(row_sum, -0.5) rows_inv_sqrt = r_inv_sqrt[indices[0]] cols_inv_sqrt = r_inv_sqrt[indices[1]] values = rows_inv_sqrt * cols_inv_sqrt return torch.sparse.FloatTensor(indices, values, adj_size) def pre_epoch_processing(self): self.epoch_user_graph, self.user_weight_matrix = self.topk_sample(self.k) self.user_weight_matrix = self.user_weight_matrix.to(self.device) def pack_edge_index(self, inter_mat): rows = inter_mat.row cols = inter_mat.col + self.n_users # ndarray([598918, 2]) for ml-imdb return np.column_stack((rows, cols)) def forward(self, interaction): user_nodes, pos_item_nodes, neg_item_nodes = interaction[0], interaction[1], interaction[2] pos_item_nodes += self.n_users neg_item_nodes += self.n_users representation = None if self.v_feat is not None: self.v_rep, self.v_preference = self.v_gcn(self.edge_index_dropv, self.edge_index, self.v_feat) representation = self.v_rep if self.t_feat is not None: self.t_rep, self.t_preference = self.t_gcn(self.edge_index_dropt, self.edge_index, self.t_feat) if representation is None: representation = self.t_rep else: if self.construction == 'cat': representation = torch.cat((self.v_rep, self.t_rep), dim=1) else: representation += self.t_rep if self.construction == 'weighted_sum': if self.v_rep is not None: self.v_rep = torch.unsqueeze(self.v_rep, 2) user_rep = self.v_rep[:self.num_user] if self.t_rep is not None: self.t_rep = torch.unsqueeze(self.t_rep, 2) user_rep = self.t_rep[:self.num_user] if self.v_rep is not None and self.t_rep is not None: user_rep = torch.matmul(torch.cat((self.v_rep[:self.num_user], self.t_rep[:self.num_user]), dim=2), self.weight_u) user_rep = torch.squeeze(user_rep) if self.construction == 'weighted_max': # pdb.set_trace() self.v_rep = torch.unsqueeze(self.v_rep, 2) self.t_rep = torch.unsqueeze(self.t_rep, 2) user_rep = torch.cat((self.v_rep[:self.num_user], self.t_rep[:self.num_user]), dim=2) user_rep = self.weight_u.transpose(1, 2) * user_rep user_rep = torch.max(user_rep, dim=2).values if self.construction == 'cat': # pdb.set_trace() if self.v_rep is not None: user_rep = self.v_rep[:self.num_user] if self.t_rep is not None: user_rep = self.t_rep[:self.num_user] if self.v_rep is not None and self.t_rep is not None: self.v_rep = torch.unsqueeze(self.v_rep, 2) self.t_rep = torch.unsqueeze(self.t_rep, 2) user_rep = torch.cat((self.v_rep[:self.num_user], self.t_rep[:self.num_user]), dim=2) user_rep = self.weight_u.transpose(1, 2) * user_rep user_rep = torch.cat((user_rep[:, :, 0], user_rep[:, :, 1]), dim=1) item_rep = representation[self.num_user:] ############################################ multi-modal information aggregation h = item_rep for i in range(self.n_layers): h = torch.sparse.mm(self.mm_adj, h) h_u1 = self.user_graph(user_rep, self.epoch_user_graph, self.user_weight_matrix) user_rep = user_rep + h_u1 item_rep = item_rep + h self.result_embed = torch.cat((user_rep, item_rep), dim=0) user_tensor = self.result_embed[user_nodes] pos_item_tensor = self.result_embed[pos_item_nodes] neg_item_tensor = self.result_embed[neg_item_nodes] pos_scores = torch.sum(user_tensor * pos_item_tensor, dim=1) neg_scores = torch.sum(user_tensor * neg_item_tensor, dim=1) return pos_scores, neg_scores def calculate_loss(self, interaction): user = interaction[0] pos_scores, neg_scores = self.forward(interaction) loss_value = -torch.mean(torch.log2(torch.sigmoid(pos_scores - neg_scores))) reg_embedding_loss_v = (self.v_preference[user] ** 2).mean() if self.v_preference is not None else 0.0 reg_embedding_loss_t = (self.t_preference[user] ** 2).mean() if self.t_preference is not None else 0.0 reg_loss = self.reg_weight * (reg_embedding_loss_v + reg_embedding_loss_t) if self.construction == 'weighted_sum': reg_loss += self.reg_weight * (self.weight_u ** 2).mean() reg_loss += self.reg_weight * (self.weight_i ** 2).mean() elif self.construction == 'cat': reg_loss += self.reg_weight * (self.weight_u ** 2).mean() elif self.construction == 'cat_mlp': reg_loss += self.reg_weight * (self.MLP_user.weight ** 2).mean() return loss_value + reg_loss def full_sort_predict(self, interaction): user_tensor = self.result_embed[:self.n_users] item_tensor = self.result_embed[self.n_users:] temp_user_tensor = user_tensor[interaction[0], :] score_matrix = torch.matmul(temp_user_tensor, item_tensor.t()) return score_matrix def topk_sample(self, k): user_graph_index = [] count_num = 0 user_weight_matrix = torch.zeros(len(self.user_graph_dict), k) tasike = [] for i in range(k): tasike.append(0) for i in range(len(self.user_graph_dict)): if len(self.user_graph_dict[i][0]) < k: count_num += 1 if len(self.user_graph_dict[i][0]) == 0: # pdb.set_trace() user_graph_index.append(tasike) continue user_graph_sample = self.user_graph_dict[i][0][:k] user_graph_weight = self.user_graph_dict[i][1][:k] while len(user_graph_sample) < k: rand_index = np.random.randint(0, len(user_graph_sample)) user_graph_sample.append(user_graph_sample[rand_index]) user_graph_weight.append(user_graph_weight[rand_index]) user_graph_index.append(user_graph_sample) if self.user_aggr_mode == 'softmax': user_weight_matrix[i] = F.softmax(torch.tensor(user_graph_weight), dim=0) # softmax if self.user_aggr_mode == 'mean': user_weight_matrix[i] = torch.ones(k) / k # mean continue user_graph_sample = self.user_graph_dict[i][0][:k] user_graph_weight = self.user_graph_dict[i][1][:k] if self.user_aggr_mode == 'softmax': user_weight_matrix[i] = F.softmax(torch.tensor(user_graph_weight), dim=0) # softmax if self.user_aggr_mode == 'mean': user_weight_matrix[i] = torch.ones(k) / k # mean user_graph_index.append(user_graph_sample) # pdb.set_trace() return user_graph_index, user_weight_matrix class User_Graph_sample(torch.nn.Module): def __init__(self, num_user, aggr_mode, dim_latent): super(User_Graph_sample, self).__init__() self.num_user = num_user self.dim_latent = dim_latent self.aggr_mode = aggr_mode def forward(self, features, user_graph, user_matrix): index = user_graph u_features = features[index] user_matrix = user_matrix.unsqueeze(1) # pdb.set_trace() u_pre = torch.matmul(user_matrix, u_features) u_pre = u_pre.squeeze() return u_pre class GCN(torch.nn.Module): def __init__(self, datasets, batch_size, num_user, num_item, dim_id, aggr_mode, num_layer, has_id, dropout, dim_latent=None, device=None, features=None): super(GCN, self).__init__() self.batch_size = batch_size self.num_user = num_user self.num_item = num_item self.datasets = datasets self.dim_id = dim_id self.dim_feat = features.size(1) self.dim_latent = dim_latent self.aggr_mode = aggr_mode self.num_layer = num_layer self.has_id = has_id self.dropout = dropout self.device = device if self.dim_latent: self.preference = nn.Parameter(nn.init.xavier_normal_(torch.tensor( np.random.randn(num_user, self.dim_latent), dtype=torch.float32, requires_grad=True), gain=1).to(self.device)) self.MLP = nn.Linear(self.dim_feat, 4 * self.dim_latent) self.MLP_1 = nn.Linear(4 * self.dim_latent, self.dim_latent) self.conv_embed_1 = Base_gcn(self.dim_latent, self.dim_latent, aggr=self.aggr_mode) else: self.preference = nn.Parameter(nn.init.xavier_normal_(torch.tensor( np.random.randn(num_user, self.dim_feat), dtype=torch.float32, requires_grad=True), gain=1).to(self.device)) self.conv_embed_1 = Base_gcn(self.dim_latent, self.dim_latent, aggr=self.aggr_mode) def forward(self, edge_index_drop, edge_index, features): temp_features = self.MLP_1(F.leaky_relu(self.MLP(features))) if self.dim_latent else features x = torch.cat((self.preference, temp_features), dim=0).to(self.device) x = F.normalize(x).to(self.device) h = self.conv_embed_1(x, edge_index) # equation 1 h_1 = self.conv_embed_1(h, edge_index) x_hat = h + x + h_1 return x_hat, self.preference class Base_gcn(MessagePassing): def __init__(self, in_channels, out_channels, normalize=True, bias=True, aggr='add', **kwargs): super(Base_gcn, self).__init__(aggr=aggr, **kwargs) self.aggr = aggr self.in_channels = in_channels self.out_channels = out_channels def forward(self, x, edge_index, size=None): # pdb.set_trace() if size is None: edge_index, _ = remove_self_loops(edge_index) # edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0)) x = x.unsqueeze(-1) if x.dim() == 1 else x # pdb.set_trace() return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x) def message(self, x_j, edge_index, size): if self.aggr == 'add': # pdb.set_trace() row, col = edge_index deg = degree(row, size[0], dtype=x_j.dtype) deg_inv_sqrt = deg.pow(-0.5) norm = deg_inv_sqrt[row] * deg_inv_sqrt[col] return norm.view(-1, 1) * x_j return x_j def update(self, aggr_out): return aggr_out def __repr(self): return '{}({},{})'.format(self.__class__.__name__, self.in_channels, self.out_channels) ================================================ FILE: src/models/dualgnn.py ================================================ # coding: utf-8 # """ DualGNN: Dual Graph Neural Network for Multimedia Recommendation, IEEE Transactions on Multimedia 2021. """ import os import numpy as np import scipy.sparse as sp import torch import torch.nn as nn import torch.nn.functional as F from torch_geometric.nn.conv import MessagePassing from torch_geometric.utils import remove_self_loops, add_self_loops, degree import torch_geometric from common.abstract_recommender import GeneralRecommender from common.loss import BPRLoss, EmbLoss from common.init import xavier_uniform_initialization class DualGNN(GeneralRecommender): def __init__(self, config, dataset): super(DualGNN, self).__init__(config, dataset) num_user = self.n_users num_item = self.n_items batch_size = config['train_batch_size'] # not used dim_x = config['embedding_size'] has_id = True self.batch_size = batch_size self.num_user = num_user self.num_item = num_item self.k = 40 self.aggr_mode = config['aggr_mode'] self.user_aggr_mode = 'softmax' self.num_layer = 1 self.cold_start = 0 self.dataset = dataset self.construction = 'weighted_sum' self.reg_weight = config['reg_weight'] self.drop_rate = 0.1 self.v_rep = None self.t_rep = None self.v_preference = None self.t_preference = None self.dim_latent = 64 self.dim_feat = 128 self.MLP_v = nn.Linear(self.dim_latent, self.dim_latent, bias=False) self.MLP_t = nn.Linear(self.dim_latent, self.dim_latent, bias=False) dataset_path = os.path.abspath(config['data_path'] + config['dataset']) self.user_graph_dict = np.load(os.path.join(dataset_path, config['user_graph_dict_file']), allow_pickle=True).item() # packing interaction in training into edge_index train_interactions = dataset.inter_matrix(form='coo').astype(np.float32) #edge_index = torch.tensor(self.pack_edge_index(train_interactions), dtype=torch.long) edge_index = self.pack_edge_index(train_interactions) self.edge_index = torch.tensor(edge_index, dtype=torch.long).t().contiguous().to(self.device) self.edge_index = torch.cat((self.edge_index, self.edge_index[[1, 0]]), dim=1) # pdb.set_trace() self.weight_u = nn.Parameter(nn.init.xavier_normal_( torch.tensor(np.random.randn(self.num_user, 2, 1), dtype=torch.float32, requires_grad=True))) self.weight_u.data = F.softmax(self.weight_u.data, dim=1) self.weight_i = nn.Parameter(nn.init.xavier_normal_( torch.tensor(np.random.randn(self.num_item, 2, 1), dtype=torch.float32, requires_grad=True))) self.weight_i.data = F.softmax(self.weight_i.data, dim=1) self.item_index = torch.zeros([self.num_item], dtype=torch.long) index = [] for i in range(self.num_item): self.item_index[i] = i index.append(i) self.drop_percent = self.drop_rate self.single_percent = 1 self.double_percent = 0 # pdb.set_trace() drop_item = torch.tensor( np.random.choice(self.item_index, int(self.num_item * self.drop_percent), replace=False)) drop_item_single = drop_item[:int(self.single_percent * len(drop_item))] # random.shuffle(index) # self.item_index = self.item_index[index] self.dropv_node_idx_single = drop_item_single[:int(len(drop_item_single) * 1 / 3)] self.dropt_node_idx_single = drop_item_single[int(len(drop_item_single) * 2 / 3):] self.dropv_node_idx = self.dropv_node_idx_single self.dropt_node_idx = self.dropt_node_idx_single # pdb.set_trace() mask_cnt = torch.zeros(self.num_item, dtype=int).tolist() for edge in edge_index: mask_cnt[edge[1] - self.num_user] += 1 mask_dropv = [] mask_dropt = [] for idx, num in enumerate(mask_cnt): temp_false = [False] * num temp_true = [True] * num mask_dropv.extend(temp_false) if idx in self.dropv_node_idx else mask_dropv.extend(temp_true) mask_dropt.extend(temp_false) if idx in self.dropt_node_idx else mask_dropt.extend(temp_true) edge_index = edge_index[np.lexsort(edge_index.T[1, None])] edge_index_dropv = edge_index[mask_dropv] edge_index_dropt = edge_index[mask_dropt] self.edge_index_dropv = torch.tensor(edge_index_dropv).t().contiguous().to(self.device) self.edge_index_dropt = torch.tensor(edge_index_dropt).t().contiguous().to(self.device) self.edge_index_dropv = torch.cat((self.edge_index_dropv, self.edge_index_dropv[[1, 0]]), dim=1) self.edge_index_dropt = torch.cat((self.edge_index_dropt, self.edge_index_dropt[[1, 0]]), dim=1) self.MLP_user = nn.Linear(self.dim_latent * 3, self.dim_latent) #self.v_feat = torch.tensor(v_feat, dtype=torch.float).to(self.device) #self.t_feat = torch.tensor(t_feat, dtype=torch.float).to(self.device) if self.v_feat is not None: self.v_drop_ze = torch.zeros(len(self.dropv_node_idx), self.v_feat.size(1)).to(self.device) self.v_gcn = GCN(self.dataset, batch_size, num_user, num_item, dim_x, self.aggr_mode, num_layer=self.num_layer, has_id=has_id, dropout=self.drop_rate, dim_latent=64, device=self.device, features=self.v_feat) # 256) if self.t_feat is not None: self.t_drop_ze = torch.zeros(len(self.dropt_node_idx), self.t_feat.size(1)).to(self.device) self.t_gcn = GCN(self.dataset, batch_size, num_user, num_item, dim_x, self.aggr_mode, num_layer=self.num_layer, has_id=has_id, dropout=self.drop_rate, dim_latent=64, device=self.device, features=self.t_feat) self.user_graph = User_Graph_sample(num_user, 'add', self.dim_latent) self.result_embed = nn.Parameter(nn.init.xavier_normal_(torch.tensor(np.random.randn(num_user + num_item, dim_x)))).to(self.device) def pre_epoch_processing(self): self.epoch_user_graph, self.user_weight_matrix = self.topk_sample(self.k) self.user_weight_matrix = self.user_weight_matrix.to(self.device) def pack_edge_index(self, inter_mat): rows = inter_mat.row cols = inter_mat.col + self.n_users # ndarray([598918, 2]) for ml-imdb return np.column_stack((rows, cols)) def forward(self, interaction): user_nodes, pos_item_nodes, neg_item_nodes = interaction[0], interaction[1], interaction[2] pos_item_nodes += self.n_users neg_item_nodes += self.n_users representation = None if self.v_feat is not None: self.v_rep, self.v_preference = self.v_gcn(self.edge_index_dropv, self.edge_index, self.v_feat) representation = self.v_rep if self.t_feat is not None: self.t_rep, self.t_preference = self.t_gcn(self.edge_index_dropt, self.edge_index, self.t_feat) if representation is None: representation = self.t_rep else: representation += self.t_rep # representation = self.v_rep+self.a_rep+self.t_rep # pdb.set_trace() if self.construction == 'weighted_sum': if self.v_rep is not None: self.v_rep = torch.unsqueeze(self.v_rep, 2) user_rep = self.v_rep[:self.num_user] if self.t_rep is not None: self.t_rep = torch.unsqueeze(self.t_rep, 2) user_rep = self.t_rep[:self.num_user] if self.v_rep is not None and self.t_rep is not None: user_rep = torch.matmul(torch.cat((self.v_rep[:self.num_user], self.t_rep[:self.num_user]), dim=2), self.weight_u) user_rep = torch.squeeze(user_rep) item_rep = representation[self.num_user:] ############################################ multi-modal information aggregation h_u1 = self.user_graph(user_rep, self.epoch_user_graph, self.user_weight_matrix) user_rep = user_rep + h_u1 self.result_embed = torch.cat((user_rep, item_rep), dim=0) user_tensor = self.result_embed[user_nodes] pos_item_tensor = self.result_embed[pos_item_nodes] neg_item_tensor = self.result_embed[neg_item_nodes] pos_scores = torch.sum(user_tensor * pos_item_tensor, dim=1) neg_scores = torch.sum(user_tensor * neg_item_tensor, dim=1) return pos_scores, neg_scores def calculate_loss(self, interaction): user = interaction[0] pos_scores, neg_scores = self.forward(interaction) loss_value = -torch.mean(torch.log2(torch.sigmoid(pos_scores - neg_scores))) reg_embedding_loss_v = (self.v_preference[user] ** 2).mean() if self.v_preference is not None else 0.0 # reg_embedding_loss_a = (self.a_preference[user.to(self.device)] ** 2).mean() reg_embedding_loss_t = (self.t_preference[user] ** 2).mean() if self.t_preference is not None else 0.0 reg_loss = self.reg_weight * (reg_embedding_loss_v + reg_embedding_loss_t) # reg_loss = self.reg_weight * (reg_embedding_loss_v+reg_embedding_loss_a+reg_embedding_loss_t) if self.construction == 'weighted_sum': reg_loss += self.reg_weight * (self.weight_u ** 2).mean() reg_loss += self.reg_weight * (self.weight_i ** 2).mean() elif self.construction == 'cat_mlp': reg_loss += self.reg_weight * (self.MLP_user.weight ** 2).mean() return loss_value + reg_loss def full_sort_predict(self, interaction): user_tensor = self.result_embed[:self.n_users] item_tensor = self.result_embed[self.n_users:] temp_user_tensor = user_tensor[interaction[0], :] score_matrix = torch.matmul(temp_user_tensor, item_tensor.t()) return score_matrix def topk_sample(self, k): user_graph_index = [] count_num = 0 user_weight_matrix = torch.zeros(len(self.user_graph_dict), k) tasike = [] for i in range(k): tasike.append(0) for i in range(len(self.user_graph_dict)): if len(self.user_graph_dict[i][0]) < k: count_num += 1 if len(self.user_graph_dict[i][0]) == 0: # pdb.set_trace() user_graph_index.append(tasike) continue user_graph_sample = self.user_graph_dict[i][0][:k] user_graph_weight = self.user_graph_dict[i][1][:k] while len(user_graph_sample) < k: # pdb.set_trace() rand_index = np.random.randint(0, len(user_graph_sample)) user_graph_sample.append(user_graph_sample[rand_index]) user_graph_weight.append(user_graph_weight[rand_index]) user_graph_index.append(user_graph_sample) # user_weight_matrix[i] = torch.tensor(user_graph_weight) / sum(user_graph_weight) #weighted if self.user_aggr_mode == 'softmax': user_weight_matrix[i] = F.softmax(torch.tensor(user_graph_weight), dim=0) # softmax if self.user_aggr_mode == 'mean': user_weight_matrix[i] = torch.ones(k) / k # mean # pdb.set_trace() continue user_graph_sample = self.user_graph_dict[i][0][:k] user_graph_weight = self.user_graph_dict[i][1][:k] # user_weight_matrix[i] = torch.tensor(user_graph_weight) / sum(user_graph_weight) #weighted if self.user_aggr_mode == 'softmax': user_weight_matrix[i] = F.softmax(torch.tensor(user_graph_weight), dim=0) # softmax if self.user_aggr_mode == 'mean': # pdb.set_trace() user_weight_matrix[i] = torch.ones(k) / k # mean # user_weight_list.append(user_weight) user_graph_index.append(user_graph_sample) # pdb.set_trace() return user_graph_index, user_weight_matrix class User_Graph_sample(torch.nn.Module): def __init__(self, num_user, aggr_mode,dim_latent): super(User_Graph_sample, self).__init__() self.num_user = num_user self.dim_latent = dim_latent self.aggr_mode = aggr_mode def forward(self, features,user_graph,user_matrix): index = user_graph u_features = features[index] user_matrix = user_matrix.unsqueeze(1) # pdb.set_trace() u_pre = torch.matmul(user_matrix,u_features) u_pre = u_pre.squeeze() return u_pre class GCN(torch.nn.Module): def __init__(self,datasets, batch_size, num_user, num_item, dim_id, aggr_mode, num_layer, has_id, dropout, dim_latent=None,device = None,features=None): super(GCN, self).__init__() self.batch_size = batch_size self.num_user = num_user self.num_item = num_item self.datasets = datasets self.dim_id = dim_id # if self.datasets =='tiktok' or self.datasets =='tiktok_new' or self.datasets == 'cold_tiktok': # self.dim_feat = 128 # elif self.datasets == 'Movielens' or self.datasets == 'cold_movie' or self.datasets == 'ml-imdb-npy': # self.dim_feat = features.size(1) self.dim_feat = features.size(1) self.dim_latent = dim_latent self.aggr_mode = aggr_mode self.num_layer = num_layer self.has_id = has_id self.dropout = dropout self.device = device if self.dim_latent: self.preference = nn.Parameter(nn.init.xavier_normal_(torch.tensor( np.random.randn(num_user, self.dim_latent), dtype=torch.float32, requires_grad=True), gain=1).to(self.device)) self.MLP = nn.Linear(self.dim_feat, 4*self.dim_latent) self.MLP_1 = nn.Linear(4*self.dim_latent, self.dim_latent) self.conv_embed_1 = Base_gcn(self.dim_latent, self.dim_latent, aggr=self.aggr_mode) else: self.preference = nn.Parameter(nn.init.xavier_normal_(torch.tensor( np.random.randn(num_user, self.dim_feat), dtype=torch.float32, requires_grad=True), gain=1).to(self.device)) self.conv_embed_1 = Base_gcn(self.dim_latent, self.dim_latent, aggr=self.aggr_mode) def forward(self, edge_index_drop,edge_index,features): # pdb.set_trace() temp_features = self.MLP_1(F.leaky_relu(self.MLP(features))) if self.dim_latent else features # temp_features = F.normalize(temp_features) x = torch.cat((self.preference, temp_features), dim=0).to(self.device) x = F.normalize(x).to(self.device) # pdb.set_trace() h = self.conv_embed_1(x, edge_index) # equation 1 h_1 = self.conv_embed_1(h, edge_index) x_hat =h + x +h_1 return x_hat, self.preference class Base_gcn(MessagePassing): def __init__(self, in_channels, out_channels, normalize=True, bias=True, aggr='add', **kwargs): super(Base_gcn, self).__init__(aggr=aggr, **kwargs) self.aggr = aggr self.in_channels = in_channels self.out_channels = out_channels def forward(self, x, edge_index, size=None): # pdb.set_trace() if size is None: edge_index, _ = remove_self_loops(edge_index) # edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0)) x = x.unsqueeze(-1) if x.dim() == 1 else x # pdb.set_trace() return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x) def message(self, x_j, edge_index, size): if self.aggr == 'add': # pdb.set_trace() row, col = edge_index deg = degree(row, size[0], dtype=x_j.dtype) deg_inv_sqrt = deg.pow(-0.5) norm = deg_inv_sqrt[row] * deg_inv_sqrt[col] return norm.view(-1, 1) * x_j return x_j def update(self, aggr_out): return aggr_out def __repr(self): return '{}({},{})'.format(self.__class__.__name__, self.in_channels, self.out_channels) ================================================ FILE: src/models/freedom.py ================================================ # coding: utf-8 # @email: enoche.chow@gmail.com r""" FREEDOM: A Tale of Two Graphs: Freezing and Denoising Graph Structures for Multimodal Recommendation # Update: 01/08/2022 """ import os import random import numpy as np import scipy.sparse as sp import torch import torch.nn as nn import torch.nn.functional as F from common.abstract_recommender import GeneralRecommender from common.loss import BPRLoss, EmbLoss, L2Loss from utils.utils import build_sim, compute_normalized_laplacian class FREEDOM(GeneralRecommender): def __init__(self, config, dataset): super(FREEDOM, self).__init__(config, dataset) self.embedding_dim = config['embedding_size'] self.feat_embed_dim = config['feat_embed_dim'] self.knn_k = config['knn_k'] self.lambda_coeff = config['lambda_coeff'] self.cf_model = config['cf_model'] self.n_layers = config['n_mm_layers'] self.n_ui_layers = config['n_ui_layers'] self.reg_weight = config['reg_weight'] self.build_item_graph = True self.mm_image_weight = config['mm_image_weight'] self.dropout = config['dropout'] self.degree_ratio = config['degree_ratio'] self.n_nodes = self.n_users + self.n_items # load dataset info self.interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32) self.norm_adj = self.get_norm_adj_mat().to(self.device) self.masked_adj, self.mm_adj = None, None self.edge_indices, self.edge_values = self.get_edge_info() self.edge_indices, self.edge_values = self.edge_indices.to(self.device), self.edge_values.to(self.device) self.edge_full_indices = torch.arange(self.edge_values.size(0)).to(self.device) self.user_embedding = nn.Embedding(self.n_users, self.embedding_dim) self.item_id_embedding = nn.Embedding(self.n_items, self.embedding_dim) nn.init.xavier_uniform_(self.user_embedding.weight) nn.init.xavier_uniform_(self.item_id_embedding.weight) dataset_path = os.path.abspath(config['data_path'] + config['dataset']) mm_adj_file = os.path.join(dataset_path, 'mm_adj_freedomdsp_{}_{}.pt'.format(self.knn_k, int(10*self.mm_image_weight))) if self.v_feat is not None: self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=False) self.image_trs = nn.Linear(self.v_feat.shape[1], self.feat_embed_dim) if self.t_feat is not None: self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=False) self.text_trs = nn.Linear(self.t_feat.shape[1], self.feat_embed_dim) if os.path.exists(mm_adj_file): self.mm_adj = torch.load(mm_adj_file) else: if self.v_feat is not None: indices, image_adj = self.get_knn_adj_mat(self.image_embedding.weight.detach()) self.mm_adj = image_adj if self.t_feat is not None: indices, text_adj = self.get_knn_adj_mat(self.text_embedding.weight.detach()) self.mm_adj = text_adj if self.v_feat is not None and self.t_feat is not None: self.mm_adj = self.mm_image_weight * image_adj + (1.0 - self.mm_image_weight) * text_adj del text_adj del image_adj torch.save(self.mm_adj, mm_adj_file) def get_knn_adj_mat(self, mm_embeddings): context_norm = mm_embeddings.div(torch.norm(mm_embeddings, p=2, dim=-1, keepdim=True)) sim = torch.mm(context_norm, context_norm.transpose(1, 0)) _, knn_ind = torch.topk(sim, self.knn_k, dim=-1) adj_size = sim.size() del sim # construct sparse adj indices0 = torch.arange(knn_ind.shape[0]).to(self.device) indices0 = torch.unsqueeze(indices0, 1) indices0 = indices0.expand(-1, self.knn_k) indices = torch.stack((torch.flatten(indices0), torch.flatten(knn_ind)), 0) # norm return indices, self.compute_normalized_laplacian(indices, adj_size) def compute_normalized_laplacian(self, indices, adj_size): adj = torch.sparse.FloatTensor(indices, torch.ones_like(indices[0]), adj_size) row_sum = 1e-7 + torch.sparse.sum(adj, -1).to_dense() r_inv_sqrt = torch.pow(row_sum, -0.5) rows_inv_sqrt = r_inv_sqrt[indices[0]] cols_inv_sqrt = r_inv_sqrt[indices[1]] values = rows_inv_sqrt * cols_inv_sqrt return torch.sparse.FloatTensor(indices, values, adj_size) def get_norm_adj_mat(self): A = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32) inter_M = self.interaction_matrix inter_M_t = self.interaction_matrix.transpose() data_dict = dict(zip(zip(inter_M.row, inter_M.col + self.n_users), [1] * inter_M.nnz)) data_dict.update(dict(zip(zip(inter_M_t.row + self.n_users, inter_M_t.col), [1] * inter_M_t.nnz))) A._update(data_dict) # norm adj matrix sumArr = (A > 0).sum(axis=1) # add epsilon to avoid Devide by zero Warning diag = np.array(sumArr.flatten())[0] + 1e-7 diag = np.power(diag, -0.5) D = sp.diags(diag) L = D * A * D # covert norm_adj matrix to tensor L = sp.coo_matrix(L) row = L.row col = L.col i = torch.LongTensor(np.array([row, col])) data = torch.FloatTensor(L.data) return torch.sparse.FloatTensor(i, data, torch.Size((self.n_nodes, self.n_nodes))) def pre_epoch_processing(self): if self.dropout <= .0: self.masked_adj = self.norm_adj return # degree-sensitive edge pruning degree_len = int(self.edge_values.size(0) * (1. - self.dropout)) degree_idx = torch.multinomial(self.edge_values, degree_len) # random sample keep_indices = self.edge_indices[:, degree_idx] # norm values keep_values = self._normalize_adj_m(keep_indices, torch.Size((self.n_users, self.n_items))) all_values = torch.cat((keep_values, keep_values)) # update keep_indices to users/items+self.n_users keep_indices[1] += self.n_users all_indices = torch.cat((keep_indices, torch.flip(keep_indices, [0])), 1) self.masked_adj = torch.sparse.FloatTensor(all_indices, all_values, self.norm_adj.shape).to(self.device) def _normalize_adj_m(self, indices, adj_size): adj = torch.sparse.FloatTensor(indices, torch.ones_like(indices[0]), adj_size) row_sum = 1e-7 + torch.sparse.sum(adj, -1).to_dense() col_sum = 1e-7 + torch.sparse.sum(adj.t(), -1).to_dense() r_inv_sqrt = torch.pow(row_sum, -0.5) rows_inv_sqrt = r_inv_sqrt[indices[0]] c_inv_sqrt = torch.pow(col_sum, -0.5) cols_inv_sqrt = c_inv_sqrt[indices[1]] values = rows_inv_sqrt * cols_inv_sqrt return values def get_edge_info(self): rows = torch.from_numpy(self.interaction_matrix.row) cols = torch.from_numpy(self.interaction_matrix.col) edges = torch.stack([rows, cols]).type(torch.LongTensor) # edge normalized values values = self._normalize_adj_m(edges, torch.Size((self.n_users, self.n_items))) return edges, values def forward(self, adj): h = self.item_id_embedding.weight for i in range(self.n_layers): h = torch.sparse.mm(self.mm_adj, h) ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0) all_embeddings = [ego_embeddings] for i in range(self.n_ui_layers): side_embeddings = torch.sparse.mm(adj, ego_embeddings) ego_embeddings = side_embeddings all_embeddings += [ego_embeddings] all_embeddings = torch.stack(all_embeddings, dim=1) all_embeddings = all_embeddings.mean(dim=1, keepdim=False) u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0) return u_g_embeddings, i_g_embeddings + h def bpr_loss(self, users, pos_items, neg_items): pos_scores = torch.sum(torch.mul(users, pos_items), dim=1) neg_scores = torch.sum(torch.mul(users, neg_items), dim=1) maxi = F.logsigmoid(pos_scores - neg_scores) mf_loss = -torch.mean(maxi) return mf_loss def calculate_loss(self, interaction): users = interaction[0] pos_items = interaction[1] neg_items = interaction[2] ua_embeddings, ia_embeddings = self.forward(self.masked_adj) self.build_item_graph = False u_g_embeddings = ua_embeddings[users] pos_i_g_embeddings = ia_embeddings[pos_items] neg_i_g_embeddings = ia_embeddings[neg_items] batch_mf_loss = self.bpr_loss(u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings) mf_v_loss, mf_t_loss = 0.0, 0.0 if self.t_feat is not None: text_feats = self.text_trs(self.text_embedding.weight) mf_t_loss = self.bpr_loss(ua_embeddings[users], text_feats[pos_items], text_feats[neg_items]) if self.v_feat is not None: image_feats = self.image_trs(self.image_embedding.weight) mf_v_loss = self.bpr_loss(ua_embeddings[users], image_feats[pos_items], image_feats[neg_items]) return batch_mf_loss + self.reg_weight * (mf_t_loss + mf_v_loss) def full_sort_predict(self, interaction): user = interaction[0] restore_user_e, restore_item_e = self.forward(self.norm_adj) u_embeddings = restore_user_e[user] # dot with all item embedding to accelerate scores = torch.matmul(u_embeddings, restore_item_e.transpose(0, 1)) return scores ================================================ FILE: src/models/grcn.py ================================================ # coding: utf-8 # """ Graph-Refined Convolutional Network for Multimedia Recommendation with Implicit Feedback, MM 2020 """ import math import time from tqdm import tqdm import numpy as np import torch import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F #from SAGEConv import SAGEConv #from GATConv import GATConv from torch_geometric.nn.conv import MessagePassing from torch_geometric.utils import add_self_loops, dropout_adj from torch_geometric.utils import remove_self_loops, add_self_loops, softmax from common.abstract_recommender import GeneralRecommender from common.loss import BPRLoss, EmbLoss from common.init import xavier_uniform_initialization # from torch.utils.checkpoint import checkpoint ########################################################################## class SAGEConv(MessagePassing): def __init__(self, in_channels, out_channels, normalize=True, bias=True, aggr='mean', **kwargs): super(SAGEConv, self).__init__(aggr=aggr, **kwargs) self.in_channels = in_channels self.out_channels = out_channels def forward(self, x, edge_index, weight_vector, size=None): self.weight_vector = weight_vector return self.propagate(edge_index, size=size, x=x) def message(self, x_j): return x_j * self.weight_vector def update(self, aggr_out): return aggr_out def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self.in_channels, self.out_channels) class GATConv(MessagePassing): def __init__(self, in_channels, out_channels, self_loops=False): super(GATConv, self).__init__(aggr='add')#, **kwargs) self.self_loops = self_loops self.in_channels = in_channels self.out_channels = out_channels def forward(self, x, edge_index, size=None): edge_index, _ = remove_self_loops(edge_index) if self.self_loops: edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0)) return self.propagate(edge_index, size=size, x=x) def message(self, x_i, x_j, size_i ,edge_index_i): #print(edge_index_i, x_i, x_j) self.alpha = torch.mul(x_i, x_j).sum(dim=-1) #print(self.alpha) #print(edge_index_i,size_i) # alpha = F.tanh(alpha) # self.alpha = F.leaky_relu(self.alpha) # alpha = torch.sigmoid(alpha) self.alpha = softmax(self.alpha, edge_index_i, num_nodes=size_i) # Sample attention coefficients stochastically. # alpha = F.dropout(alpha, p=self.dropout, training=self.training) return x_j*self.alpha.view(-1,1) # return x_j * alpha.view(-1, self.heads, 1) def update(self, aggr_out): return aggr_out class EGCN(torch.nn.Module): def __init__(self, num_user, num_item, dim_E, aggr_mode, has_act, has_norm): super(EGCN, self).__init__() self.num_user = num_user self.num_item = num_item self.dim_E = dim_E self.aggr_mode = aggr_mode self.has_act = has_act self.has_norm = has_norm self.id_embedding = nn.Parameter( nn.init.xavier_normal_(torch.rand((num_user+num_item, dim_E)))) self.conv_embed_1 = SAGEConv(dim_E, dim_E, aggr=aggr_mode) self.conv_embed_2 = SAGEConv(dim_E, dim_E, aggr=aggr_mode) def forward(self, edge_index, weight_vector): x = self.id_embedding edge_index = torch.cat((edge_index, edge_index[[1,0]]), dim=1) if self.has_norm: x = F.normalize(x) x_hat_1 = self.conv_embed_1(x, edge_index, weight_vector) if self.has_act: x_hat_1 = F.leaky_relu_(x_hat_1) x_hat_2 = self.conv_embed_2(x_hat_1, edge_index, weight_vector) if self.has_act: x_hat_2 = F.leaky_relu_(x_hat_2) return x + x_hat_1 + x_hat_2 class CGCN(torch.nn.Module): def __init__(self, features, num_user, num_item, dim_C, aggr_mode, num_routing, has_act, has_norm, is_word=False): super(CGCN, self).__init__() self.num_user = num_user self.num_item = num_item self.aggr_mode = aggr_mode self.num_routing = num_routing self.has_act = has_act self.has_norm = has_norm self.dim_C = dim_C self.preference = nn.Parameter(nn.init.xavier_normal_(torch.rand((num_user, dim_C)))) self.conv_embed_1 = GATConv(self.dim_C, self.dim_C) self.is_word = is_word if is_word: self.word_tensor = torch.LongTensor(features).cuda() self.features = nn.Embedding(torch.max(features[1])+1, dim_C) nn.init.xavier_normal_(self.features.weight) else: self.dim_feat = features.size(1) self.features = features self.MLP = nn.Linear(self.dim_feat, self.dim_C) #print('MLP weight',self.MLP.weight) nn.init.xavier_normal_(self.MLP.weight) #print(self.MLP.weight) def forward(self, edge_index): #print(self.features) features = F.leaky_relu(self.MLP(self.features)) #print('features',features) if self.has_norm: preference = F.normalize(self.preference) features = F.normalize(features) #print(preference,features) for i in range(self.num_routing): x = torch.cat((preference, features), dim=0) #print(x,edge_index) x_hat_1 = self.conv_embed_1(x, edge_index) preference = preference + x_hat_1[:self.num_user] if self.has_norm: preference = F.normalize(preference) x = torch.cat((preference, features), dim=0) edge_index = torch.cat((edge_index, edge_index[[1,0]]), dim=1) x_hat_1 = self.conv_embed_1(x, edge_index) if self.has_act: x_hat_1 = F.leaky_relu_(x_hat_1) return x + x_hat_1, self.conv_embed_1.alpha.view(-1, 1) class GRCN(GeneralRecommender): def __init__(self, config, dataset): super(GRCN, self).__init__(config, dataset) self.num_user = self.n_users self.num_item = self.n_items num_user = self.n_users num_item = self.n_items dim_x = config['embedding_size'] dim_C = config['latent_embedding'] num_layer = config['n_layers'] batch_size = config['train_batch_size'] # not used self.aggr_mode = 'add' self.weight_mode = 'confid' self.fusion_mode = 'concat' has_id = True has_act= False has_norm= True is_word = False self.weight = torch.tensor([[1.0], [-1.0]]).to(self.device) self.reg_weight = config['reg_weight'] self.dropout = 0 # packing interaction in training into edge_index train_interactions = dataset.inter_matrix(form='coo').astype(np.float32) edge_index = torch.tensor(self.pack_edge_index(train_interactions), dtype=torch.long) self.edge_index = edge_index.t().contiguous().to(self.device) #self.edge_index = torch.cat((self.edge_index, self.edge_index[[1, 0]]), dim=1) self.num_modal = 0 self.id_gcn = EGCN(num_user, num_item, dim_x, self.aggr_mode, has_act, has_norm) self.pruning = True num_model = 0 if self.v_feat is not None: self.v_gcn = CGCN(self.v_feat, num_user, num_item, dim_C, self.aggr_mode, num_layer, has_act, has_norm) num_model += 1 #if a_feat is not None: #self.a_gcn = CGCN(self.a_feat, num_user, num_item, dim_C, aggr_mode, num_layer, has_act, has_norm) #num_model += 1 if self.t_feat is not None: self.t_gcn = CGCN(self.t_feat, num_user, num_item, dim_C, self.aggr_mode, num_layer, has_act, has_norm, is_word) num_model += 1 self.model_specific_conf = nn.Parameter(nn.init.xavier_normal_(torch.rand((num_user+num_item, num_model)))) self.result = nn.init.xavier_normal_(torch.rand((num_user+num_item, dim_x))).to(self.device) def pack_edge_index(self, inter_mat): rows = inter_mat.row cols = inter_mat.col + self.n_users # ndarray([598918, 2]) for ml-imdb return np.column_stack((rows, cols)) def forward(self): weight = None content_rep = None num_modal = 0 edge_index, _ = dropout_adj(self.edge_index, p=self.dropout) #print('edge_index: ', edge_index) if self.v_feat is not None: num_modal += 1 v_rep, weight_v = self.v_gcn(edge_index) weight = weight_v content_rep = v_rep #print('weight_v is: ', weight) #print('content_rep: ',content_rep) #if self.a_feat is not None: #num_modal += 1 #a_rep, weight_a = self.a_gcn(edge_index) #if weight is None: #weight = weight_a #content_rep = a_rep #else: #content_rep = torch.cat((content_rep,a_rep),dim=1) #if self.weight_mode == 'mean': #weight = weight+ weight_a #else: #weight = torch.cat((weight, weight_a), dim=1) if self.t_feat is not None: num_modal += 1 t_rep, weight_t = self.t_gcn(edge_index) if weight is None: weight = weight_t conetent_rep = t_rep else: content_rep = torch.cat((content_rep,t_rep),dim=1) if self.weight_mode == 'mean': weight = weight+ weight_t else: weight = torch.cat((weight, weight_t), dim=1) if self.weight_mode == 'mean': weight = weight/num_modal elif self.weight_mode == 'max': weight, _ = torch.max(weight, dim=1) weight = weight.view(-1, 1) elif self.weight_mode == 'confid': confidence = torch.cat((self.model_specific_conf[edge_index[0]], self.model_specific_conf[edge_index[1]]), dim=0) weight = weight * confidence weight, _ = torch.max(weight, dim=1) weight = weight.view(-1, 1) #print('weight is: ', weight) if self.pruning: weight = torch.relu(weight) id_rep = self.id_gcn(edge_index, weight) #print('id_rep is: ',id_rep) if self.fusion_mode == 'concat': representation = torch.cat((id_rep, content_rep), dim=1) elif self.fusion_mode == 'id': representation = id_rep elif self.fusion_mode == 'mean': representation = (id_rep+v_rep+a_rep+t_rep)/4 self.result = representation #print('representation is: ',representation) return representation def calculate_loss(self, interaction): batch_users = interaction[0] pos_items = interaction[1] + self.n_users neg_items = interaction[2] + self.n_users user_tensor = batch_users.repeat_interleave(2) stacked_items = torch.stack((pos_items, neg_items)) item_tensor = stacked_items.t().contiguous().view(-1) out = self.forward() user_score = out[user_tensor] item_score = out[item_tensor] score = torch.sum(user_score * item_score, dim=1).view(-1, 2) loss = -torch.mean(torch.log(torch.sigmoid(torch.matmul(score, self.weight)))) reg_embedding_loss = (self.id_gcn.id_embedding[user_tensor]**2 + self.id_gcn.id_embedding[item_tensor]**2).mean() if self.v_feat is not None: reg_embedding_loss += (self.v_gcn.preference**2).mean() reg_loss = self.reg_weight * reg_embedding_loss reg_content_loss = torch.zeros(1).cuda() if self.v_feat is not None: reg_content_loss = reg_content_loss + (self.v_gcn.preference[user_tensor]**2).mean() #if self.a_feat is not None: #reg_content_loss = reg_content_loss + (self.a_gcn.preference[user_tensor]**2).mean() if self.t_feat is not None: reg_content_loss = reg_content_loss + (self.t_gcn.preference[user_tensor]**2).mean() reg_confid_loss = (self.model_specific_conf**2).mean() reg_loss = reg_embedding_loss + reg_content_loss reg_loss = self.reg_weight * reg_loss #print('loss',loss + reg_loss) return loss + reg_loss def full_sort_predict(self, interaction): user_tensor = self.result[:self.n_users] item_tensor = self.result[self.n_users:] temp_user_tensor = user_tensor[interaction[0], :] score_matrix = torch.matmul(temp_user_tensor, item_tensor.t()) return score_matrix ================================================ FILE: src/models/itemknncbf.py ================================================ # coding: utf-8 # @email: enoche.chow@gmail.com r""" ItemKNNCBF ################################################ Reference: https://github.com/CRIPAC-DIG/LATTICE Are We Really Making Much Progress? A Worrying Analysis of Recent Neural Recommendation Approaches, ACM RecSys'19 """ import os import random import numpy as np import scipy.sparse as sp import torch import torch.nn as nn import torch.nn.functional as F from common.abstract_recommender import GeneralRecommender from common.loss import BPRLoss, EmbLoss, L2Loss from utils.utils import build_sim, compute_normalized_laplacian, build_knn_neighbourhood class ItemKNNCBF(GeneralRecommender): def __init__(self, config, dataset): super(ItemKNNCBF, self).__init__(config, dataset) self.knn_k = config['knn_k'] self.shrink = config['shrink'] # load dataset info interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32) values = interaction_matrix.data indices = np.vstack((interaction_matrix.row, interaction_matrix.col)) i = torch.LongTensor(indices) v = torch.FloatTensor(values) shape = interaction_matrix.shape r_matrix = torch.sparse.FloatTensor(i, v, torch.Size(shape)).to(self.device) if self.v_feat is not None and self.t_feat is not None: item_fea = torch.cat((self.v_feat, self.t_feat), -1) elif self.v_feat is not None: item_fea = self.v_feat else: item_fea = self.t_feat self.dummy_embeddings = nn.Parameter(torch.Tensor([0.5, 0.5])) # build item-item sim matrix item_sim = self.build_item_sim_matrix(item_fea) self.scores_matrix = torch.mm(r_matrix, item_sim) def build_item_sim_matrix(self, features): i_norm = torch.norm(features, p=2, dim=-1, keepdim=True) ij_norm = i_norm * i_norm.T + self.shrink ij = torch.mm(features, features.T) sim = ij.div(ij_norm) # top-k knn_val, knn_ind = torch.topk(sim, self.knn_k, dim=-1) weighted_adjacency_matrix = (torch.zeros_like(sim)).scatter_(-1, knn_ind, knn_val) return weighted_adjacency_matrix def build_item_sim_matrix_with_blocks(self, features, block_size=1000): from tqdm import tqdm """ 分块计算物品相似矩阵并显示进度条。 :param features: Tensor, 物品特征向量,形状为 (num_items, feature_dim) :param block_size: int, 分块大小,默认 1000 :return: Tensor, 权重邻接矩阵 """ num_items = features.size(0) i_norm = torch.norm(features, p=2, dim=-1, keepdim=True) shrink = self.shrink # 初始化相似矩阵 weighted_adjacency_matrix = torch.zeros(num_items, num_items, device=features.device) # 分块计算 for start_idx in tqdm(range(0, num_items, block_size), desc="Computing item similarities"): end_idx = min(start_idx + block_size, num_items) # 当前分块 block_features = features[start_idx:end_idx] block_norm = i_norm[start_idx:end_idx] # 计算分块与所有物品的相似性 ij = torch.mm(block_features, features.T) ij_norm = block_norm * i_norm.T + shrink sim = ij.div(ij_norm) # top-k knn_val, knn_ind = torch.topk(sim, self.knn_k, dim=-1) weighted_adjacency_matrix[start_idx:end_idx] = (torch.zeros_like(sim) .scatter_(-1, knn_ind, knn_val)) return weighted_adjacency_matrix def calculate_loss(self, interaction): tmp_v = torch.tensor(0.0) return tmp_v def full_sort_predict(self, interaction): user = interaction[0] scores = self.scores_matrix[user] return scores ================================================ FILE: src/models/lattice.py ================================================ # coding: utf-8 # @email: enoche.chow@gmail.com r""" LATTICE ################################################ Reference: https://github.com/CRIPAC-DIG/LATTICE ACM MM'2021: [Mining Latent Structures for Multimedia Recommendation] https://arxiv.org/abs/2104.09036 """ import os import random import numpy as np import scipy.sparse as sp import torch import torch.nn as nn import torch.nn.functional as F from common.abstract_recommender import GeneralRecommender from common.loss import BPRLoss, EmbLoss, L2Loss from utils.utils import build_sim, compute_normalized_laplacian, build_knn_neighbourhood class LATTICE(GeneralRecommender): def __init__(self, config, dataset): super(LATTICE, self).__init__(config, dataset) self.embedding_dim = config['embedding_size'] self.feat_embed_dim = config['feat_embed_dim'] self.weight_size = config['weight_size'] self.knn_k = config['knn_k'] self.lambda_coeff = config['lambda_coeff'] self.cf_model = config['cf_model'] self.n_layers = config['n_layers'] self.reg_weight = config['reg_weight'] self.build_item_graph = True # load dataset info self.interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32) self.norm_adj = self.get_adj_mat() self.norm_adj = self.sparse_mx_to_torch_sparse_tensor(self.norm_adj).float().to(self.device) self.item_adj = None self.n_ui_layers = len(self.weight_size) self.weight_size = [self.embedding_dim] + self.weight_size self.user_embedding = nn.Embedding(self.n_users, self.embedding_dim) self.item_id_embedding = nn.Embedding(self.n_items, self.embedding_dim) nn.init.xavier_uniform_(self.user_embedding.weight) nn.init.xavier_uniform_(self.item_id_embedding.weight) if config['cf_model'] == 'ngcf': self.GC_Linear_list = nn.ModuleList() self.Bi_Linear_list = nn.ModuleList() self.dropout_list = nn.ModuleList() dropout_list = config['mess_dropout'] for i in range(self.n_ui_layers): self.GC_Linear_list.append(nn.Linear(self.weight_size[i], self.weight_size[i + 1])) self.Bi_Linear_list.append(nn.Linear(self.weight_size[i], self.weight_size[i + 1])) self.dropout_list.append(nn.Dropout(dropout_list[i])) dataset_path = os.path.abspath(config['data_path'] + config['dataset']) image_adj_file = os.path.join(dataset_path, 'image_adj_{}.pt'.format(self.knn_k)) text_adj_file = os.path.join(dataset_path, 'text_adj_{}.pt'.format(self.knn_k)) if self.v_feat is not None: self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=False) if os.path.exists(image_adj_file): image_adj = torch.load(image_adj_file) else: image_adj = build_sim(self.image_embedding.weight.detach()) image_adj = build_knn_neighbourhood(image_adj, topk=self.knn_k) image_adj = compute_normalized_laplacian(image_adj) torch.save(image_adj, image_adj_file) self.image_original_adj = image_adj.cuda() if self.t_feat is not None: self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=False) if os.path.exists(text_adj_file): text_adj = torch.load(text_adj_file) else: text_adj = build_sim(self.text_embedding.weight.detach()) text_adj = build_knn_neighbourhood(text_adj, topk=self.knn_k) text_adj = compute_normalized_laplacian(text_adj) torch.save(text_adj, text_adj_file) self.text_original_adj = text_adj.cuda() if self.v_feat is not None: self.image_trs = nn.Linear(self.v_feat.shape[1], self.feat_embed_dim) if self.t_feat is not None: self.text_trs = nn.Linear(self.t_feat.shape[1], self.feat_embed_dim) self.modal_weight = nn.Parameter(torch.Tensor([0.5, 0.5])) self.softmax = nn.Softmax(dim=0) def pre_epoch_processing(self): self.build_item_graph = True def get_adj_mat(self): adj_mat = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32) adj_mat = adj_mat.tolil() R = self.interaction_matrix.tolil() adj_mat[:self.n_users, self.n_users:] = R adj_mat[self.n_users:, :self.n_users] = R.T adj_mat = adj_mat.todok() def normalized_adj_single(adj): rowsum = np.array(adj.sum(1)) d_inv = np.power(rowsum, -1).flatten() d_inv[np.isinf(d_inv)] = 0. d_mat_inv = sp.diags(d_inv) norm_adj = d_mat_inv.dot(adj) # norm_adj = adj.dot(d_mat_inv) #print('generate single-normalized adjacency matrix.') return norm_adj.tocoo() norm_adj_mat = normalized_adj_single(adj_mat + sp.eye(adj_mat.shape[0])) return norm_adj_mat.tocsr() def sparse_mx_to_torch_sparse_tensor(self, sparse_mx): """Convert a scipy sparse matrix to a torch sparse tensor.""" sparse_mx = sparse_mx.tocoo().astype(np.float32) indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)) values = torch.from_numpy(sparse_mx.data) shape = torch.Size(sparse_mx.shape) return torch.sparse.FloatTensor(indices, values, shape) def forward(self, adj, build_item_graph=False): if self.v_feat is not None: image_feats = self.image_trs(self.image_embedding.weight) if self.t_feat is not None: text_feats = self.text_trs(self.text_embedding.weight) if build_item_graph: weight = self.softmax(self.modal_weight) if self.v_feat is not None: self.image_adj = build_sim(image_feats) self.image_adj = build_knn_neighbourhood(self.image_adj, topk=self.knn_k) learned_adj = self.image_adj original_adj = self.image_original_adj if self.t_feat is not None: self.text_adj = build_sim(text_feats) self.text_adj = build_knn_neighbourhood(self.text_adj, topk=self.knn_k) learned_adj = self.text_adj original_adj = self.text_original_adj if self.v_feat is not None and self.t_feat is not None: learned_adj = weight[0] * self.image_adj + weight[1] * self.text_adj original_adj = weight[0] * self.image_original_adj + weight[1] * self.text_original_adj learned_adj = compute_normalized_laplacian(learned_adj) if self.item_adj is not None: del self.item_adj self.item_adj = (1 - self.lambda_coeff) * learned_adj + self.lambda_coeff * original_adj else: self.item_adj = self.item_adj.detach() h = self.item_id_embedding.weight for i in range(self.n_layers): h = torch.mm(self.item_adj, h) if self.cf_model == 'ngcf': ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0) all_embeddings = [ego_embeddings] for i in range(self.n_ui_layers): side_embeddings = torch.sparse.mm(adj, ego_embeddings) sum_embeddings = F.leaky_relu(self.GC_Linear_list[i](side_embeddings)) bi_embeddings = torch.mul(ego_embeddings, side_embeddings) bi_embeddings = F.leaky_relu(self.Bi_Linear_list[i](bi_embeddings)) ego_embeddings = sum_embeddings + bi_embeddings ego_embeddings = self.dropout_list[i](ego_embeddings) norm_embeddings = F.normalize(ego_embeddings, p=2, dim=1) all_embeddings += [norm_embeddings] all_embeddings = torch.stack(all_embeddings, dim=1) all_embeddings = all_embeddings.mean(dim=1, keepdim=False) u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0) i_g_embeddings = i_g_embeddings + F.normalize(h, p=2, dim=1) return u_g_embeddings, i_g_embeddings elif self.cf_model == 'lightgcn': ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0) all_embeddings = [ego_embeddings] for i in range(self.n_ui_layers): side_embeddings = torch.sparse.mm(adj, ego_embeddings) ego_embeddings = side_embeddings all_embeddings += [ego_embeddings] all_embeddings = torch.stack(all_embeddings, dim=1) all_embeddings = all_embeddings.mean(dim=1, keepdim=False) u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0) i_g_embeddings = i_g_embeddings + F.normalize(h, p=2, dim=1) return u_g_embeddings, i_g_embeddings elif self.cf_model == 'mf': return self.user_embedding.weight, self.item_id_embedding.weight + F.normalize(h, p=2, dim=1) def bpr_loss(self, users, pos_items, neg_items): pos_scores = torch.sum(torch.mul(users, pos_items), dim=1) neg_scores = torch.sum(torch.mul(users, neg_items), dim=1) regularizer = 1./2*(users**2).sum() + 1./2*(pos_items**2).sum() + 1./2*(neg_items**2).sum() regularizer = regularizer / self.batch_size maxi = F.logsigmoid(pos_scores - neg_scores) mf_loss = -torch.mean(maxi) emb_loss = self.reg_weight * regularizer reg_loss = 0.0 return mf_loss, emb_loss, reg_loss def calculate_loss(self, interaction): users = interaction[0] pos_items = interaction[1] neg_items = interaction[2] ua_embeddings, ia_embeddings = self.forward(self.norm_adj, build_item_graph=self.build_item_graph) self.build_item_graph = False u_g_embeddings = ua_embeddings[users] pos_i_g_embeddings = ia_embeddings[pos_items] neg_i_g_embeddings = ia_embeddings[neg_items] batch_mf_loss, batch_emb_loss, batch_reg_loss = self.bpr_loss(u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings) return batch_mf_loss + batch_emb_loss + batch_reg_loss def full_sort_predict(self, interaction): user = interaction[0] restore_user_e, restore_item_e = self.forward(self.norm_adj, build_item_graph=True) u_embeddings = restore_user_e[user] # dot with all item embedding to accelerate scores = torch.matmul(u_embeddings, restore_item_e.transpose(0, 1)) return scores ================================================ FILE: src/models/layergcn.py ================================================ # -*- coding: utf-8 -*- import numpy as np import scipy.sparse as sp import math import random import torch import torch.nn as nn import torch.nn.functional as F from models.common.abstract_recommender import GeneralRecommender from models.common.loss import BPRLoss, EmbLoss, L2Loss class LayerGCN(GeneralRecommender): def __init__(self, config, dataset): super(LayerGCN, self).__init__(config, dataset) # load dataset info self.interaction_matrix = dataset.inter_matrix( form='coo').astype(np.float32) # load parameters info self.latent_dim = config['embedding_size'] # int type:the embedding size of lightGCN self.n_layers = config['n_layers'] # int type:the layer num of lightGCN self.reg_weight = config['reg_weight'] # float32 type: the weight decay for l2 normalizaton self.dropout = config['dropout'] self.n_nodes = self.n_users + self.n_items # define layers and loss self.user_embeddings = nn.Parameter(nn.init.xavier_uniform_(torch.empty(self.n_users, self.latent_dim))) self.item_embeddings = nn.Parameter(nn.init.xavier_uniform_(torch.empty(self.n_items, self.latent_dim))) # normalized adj matrix self.norm_adj_matrix = self.get_norm_adj_mat().to(self.device) self.masked_adj = None self.forward_adj = None self.pruning_random = False # edge prune self.edge_indices, self.edge_values = self.get_edge_info() self.mf_loss = BPRLoss() self.reg_loss = L2Loss() # def post_epoch_processing(self): # with torch.no_grad(): # return '=== Layer weights: {}'.format(F.softmax(self.layer_weights.exp(), dim=0)) def pre_epoch_processing(self): if self.dropout <= .0: self.masked_adj = self.norm_adj_matrix return keep_len = int(self.edge_values.size(0) * (1. - self.dropout)) if self.pruning_random: # pruning randomly keep_idx = torch.tensor(random.sample(range(self.edge_values.size(0)), keep_len)) else: # pruning edges by pro keep_idx = torch.multinomial(self.edge_values, keep_len) # prune high-degree nodes self.pruning_random = True ^ self.pruning_random keep_indices = self.edge_indices[:, keep_idx] # norm values keep_values = self._normalize_adj_m(keep_indices, torch.Size((self.n_users, self.n_items))) all_values = torch.cat((keep_values, keep_values)) # update keep_indices to users/items+self.n_users keep_indices[1] += self.n_users all_indices = torch.cat((keep_indices, torch.flip(keep_indices, [0])), 1) self.masked_adj = torch.sparse.FloatTensor(all_indices, all_values, self.norm_adj_matrix.shape).to(self.device) def _normalize_adj_m(self, indices, adj_size): adj = torch.sparse.FloatTensor(indices, torch.ones_like(indices[0]), adj_size) row_sum = 1e-7 + torch.sparse.sum(adj, -1).to_dense() col_sum = 1e-7 + torch.sparse.sum(adj.t(), -1).to_dense() r_inv_sqrt = torch.pow(row_sum, -0.5) rows_inv_sqrt = r_inv_sqrt[indices[0]] c_inv_sqrt = torch.pow(col_sum, -0.5) cols_inv_sqrt = c_inv_sqrt[indices[1]] values = rows_inv_sqrt * cols_inv_sqrt return values def get_edge_info(self): rows = torch.from_numpy(self.interaction_matrix.row) cols = torch.from_numpy(self.interaction_matrix.col) edges = torch.stack([rows, cols]).type(torch.LongTensor) # edge normalized values values = self._normalize_adj_m(edges, torch.Size((self.n_users, self.n_items))) return edges, values def get_norm_adj_mat(self): A = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32) inter_M = self.interaction_matrix inter_M_t = self.interaction_matrix.transpose() data_dict = dict(zip(zip(inter_M.row, inter_M.col + self.n_users), [1] * inter_M.nnz)) data_dict.update(dict(zip(zip(inter_M_t.row + self.n_users, inter_M_t.col), [1] * inter_M_t.nnz))) A._update(data_dict) # norm adj matrix sumArr = (A > 0).sum(axis=1) # add epsilon to avoid Devide by zero Warning diag = np.array(sumArr.flatten())[0] + 1e-7 diag = np.power(diag, -0.5) D = sp.diags(diag) L = D * A * D # covert norm_adj matrix to tensor L = sp.coo_matrix(L) row = L.row col = L.col i = torch.LongTensor([row, col]) data = torch.FloatTensor(L.data) return torch.sparse.FloatTensor(i, data, torch.Size((self.n_nodes, self.n_nodes))) def get_ego_embeddings(self): r"""Get the embedding of users and items and combine to an embedding matrix. Returns: Tensor of the embedding matrix. Shape of [n_items+n_users, embedding_dim] """ ego_embeddings = torch.cat([self.user_embeddings, self.item_embeddings], 0) return ego_embeddings def forward(self): ego_embeddings = self.get_ego_embeddings() all_embeddings = ego_embeddings embeddings_layers = [] for layer_idx in range(self.n_layers): all_embeddings = torch.sparse.mm(self.forward_adj, all_embeddings) _weights = F.cosine_similarity(all_embeddings, ego_embeddings, dim=-1) all_embeddings = torch.einsum('a,ab->ab', _weights, all_embeddings) embeddings_layers.append(all_embeddings) ui_all_embeddings = torch.sum(torch.stack(embeddings_layers, dim=0), dim=0) user_all_embeddings, item_all_embeddings = torch.split(ui_all_embeddings, [self.n_users, self.n_items]) return user_all_embeddings, item_all_embeddings def bpr_loss(self, u_embeddings, i_embeddings, user, pos_item, neg_item): u_embeddings = u_embeddings[user] posi_embeddings = i_embeddings[pos_item] negi_embeddings = i_embeddings[neg_item] # calculate BPR Loss pos_scores = torch.mul(u_embeddings, posi_embeddings).sum(dim=1) neg_scores = torch.mul(u_embeddings, negi_embeddings).sum(dim=1) m = torch.nn.LogSigmoid() bpr_loss = torch.sum(-m(pos_scores - neg_scores)) #mf_loss = self.mf_loss(pos_scores, neg_scores) return bpr_loss def emb_loss(self, user, pos_item, neg_item): # calculate BPR Loss u_ego_embeddings = self.user_embeddings[user] posi_ego_embeddings = self.item_embeddings[pos_item] negi_ego_embeddings = self.item_embeddings[neg_item] reg_loss = self.reg_loss(u_ego_embeddings, posi_ego_embeddings, negi_ego_embeddings) return reg_loss def calculate_loss(self, interaction): user = interaction[0] pos_item = interaction[1] neg_item = interaction[2] self.forward_adj = self.masked_adj user_all_embeddings, item_all_embeddings = self.forward() mf_loss = self.bpr_loss(user_all_embeddings, item_all_embeddings, user, pos_item, neg_item) reg_loss = self.emb_loss(user, pos_item, neg_item) loss = mf_loss + self.reg_weight * reg_loss return loss def full_sort_predict(self, interaction): user = interaction[0] self.forward_adj = self.norm_adj_matrix restore_user_e, restore_item_e = self.forward() u_embeddings = restore_user_e[user] # dot with all item embedding to accelerate scores = torch.matmul(u_embeddings, restore_item_e.transpose(0, 1)) return scores ================================================ FILE: src/models/lgmrec.py ================================================ # coding: utf-8 # @email: georgeguo.gzq.cn@gmail.com r""" LGMRec ################################################ Reference: https://github.com/georgeguo-cn/LGMRec AAAI'2024: [LGMRec: Local and Global Graph Learning for Multimodal Recommendation] """ import numpy as np import scipy.sparse as sp import torch import torch.nn as nn import torch.nn.functional as F from common.abstract_recommender import GeneralRecommender class LGMRec(GeneralRecommender): def __init__(self, config, dataset): super(LGMRec, self).__init__(config, dataset) self.embedding_dim = config['embedding_size'] self.feat_embed_dim = config['feat_embed_dim'] self.cf_model = config['cf_model'] self.n_mm_layer = config['n_mm_layers'] self.n_ui_layers = config['n_ui_layers'] self.n_hyper_layer = config['n_hyper_layer'] self.hyper_num = config['hyper_num'] self.keep_rate = config['keep_rate'] self.alpha = config['alpha'] self.cl_weight = config['cl_weight'] self.reg_weight = config['reg_weight'] self.tau = 0.2 self.n_nodes = self.n_users + self.n_items self.hgnnLayer = HGNNLayer(self.n_hyper_layer) # load dataset info self.interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32) self.adj = self.scipy_matrix_to_sparse_tenser(self.interaction_matrix, torch.Size((self.n_users, self.n_items))) self.num_inters, self.norm_adj = self.get_norm_adj_mat() self.num_inters = torch.FloatTensor(1.0 / (self.num_inters + 1e-7)).to(self.device) # init user and item ID embeddings self.user_embedding = nn.Embedding(self.n_users, self.embedding_dim) self.item_id_embedding = nn.Embedding(self.n_items, self.embedding_dim) nn.init.xavier_uniform_(self.user_embedding.weight) nn.init.xavier_uniform_(self.item_id_embedding.weight) self.drop = nn.Dropout(p=1-self.keep_rate) # load item modal features and define hyperedges embeddings if self.v_feat is not None: self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=True) self.item_image_trs = nn.Parameter(nn.init.xavier_uniform_(torch.zeros(self.v_feat.shape[1], self.feat_embed_dim))) self.v_hyper = nn.Parameter(nn.init.xavier_uniform_(torch.zeros(self.v_feat.shape[1], self.hyper_num))) if self.t_feat is not None: self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=True) self.item_text_trs = nn.Parameter(nn.init.xavier_uniform_(torch.zeros(self.t_feat.shape[1], self.feat_embed_dim))) self.t_hyper = nn.Parameter(nn.init.xavier_uniform_(torch.zeros(self.t_feat.shape[1], self.hyper_num))) def scipy_matrix_to_sparse_tenser(self, matrix, shape): row = matrix.row col = matrix.col i = torch.LongTensor(np.array([row, col])) data = torch.FloatTensor(matrix.data) return torch.sparse.FloatTensor(i, data, shape).to(self.device) def get_norm_adj_mat(self): A = sp.dok_matrix((self.n_nodes, self.n_nodes), dtype=np.float32) inter_M = self.interaction_matrix inter_M_t = self.interaction_matrix.transpose() data_dict = dict(zip(zip(inter_M.row, inter_M.col + self.n_users), [1] * inter_M.nnz)) data_dict.update(dict(zip(zip(inter_M_t.row + self.n_users, inter_M_t.col), [1] * inter_M_t.nnz))) A._update(data_dict) # norm adj matrix sumArr = (A > 0).sum(axis=1) # add epsilon to avoid Devide by zero Warning diag = np.array(sumArr.flatten())[0] + 1e-7 diag = np.power(diag, -0.5) D = sp.diags(diag) L = D * A * D # covert norm_adj matrix to tensor L = sp.coo_matrix(L) return sumArr, self.scipy_matrix_to_sparse_tenser(L, torch.Size((self.n_nodes, self.n_nodes))) # collaborative graph embedding def cge(self): if self.cf_model == 'mf': cge_embs = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0) if self.cf_model == 'lightgcn': ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0) cge_embs = [ego_embeddings] for _ in range(self.n_ui_layers): ego_embeddings = torch.sparse.mm(self.norm_adj, ego_embeddings) cge_embs += [ego_embeddings] cge_embs = torch.stack(cge_embs, dim=1) cge_embs = cge_embs.mean(dim=1, keepdim=False) return cge_embs # modality graph embedding def mge(self, str='v'): if str == 'v': item_feats = torch.mm(self.image_embedding.weight, self.item_image_trs) elif str == 't': item_feats = torch.mm(self.text_embedding.weight, self.item_text_trs) user_feats = torch.sparse.mm(self.adj, item_feats) * self.num_inters[:self.n_users] # user_feats = self.user_embedding.weight mge_feats = torch.concat([user_feats, item_feats], dim=0) for _ in range(self.n_mm_layer): mge_feats = torch.sparse.mm(self.norm_adj, mge_feats) return mge_feats def forward(self): # hyperedge dependencies constructing if self.v_feat is not None: iv_hyper = torch.mm(self.image_embedding.weight, self.v_hyper) uv_hyper = torch.mm(self.adj, iv_hyper) iv_hyper = F.gumbel_softmax(iv_hyper, self.tau, dim=1, hard=False) uv_hyper = F.gumbel_softmax(uv_hyper, self.tau, dim=1, hard=False) if self.t_feat is not None: it_hyper = torch.mm(self.text_embedding.weight, self.t_hyper) ut_hyper = torch.mm(self.adj, it_hyper) it_hyper = F.gumbel_softmax(it_hyper, self.tau, dim=1, hard=False) ut_hyper = F.gumbel_softmax(ut_hyper, self.tau, dim=1, hard=False) # CGE: collaborative graph embedding cge_embs = self.cge() if self.v_feat is not None and self.t_feat is not None: # MGE: modal graph embedding v_feats = self.mge('v') t_feats = self.mge('t') # local embeddings = collaborative-related embedding + modality-related embedding mge_embs = F.normalize(v_feats) + F.normalize(t_feats) lge_embs = cge_embs + mge_embs # GHE: global hypergraph embedding uv_hyper_embs, iv_hyper_embs = self.hgnnLayer(self.drop(iv_hyper), self.drop(uv_hyper), cge_embs[self.n_users:]) ut_hyper_embs, it_hyper_embs = self.hgnnLayer(self.drop(it_hyper), self.drop(ut_hyper), cge_embs[self.n_users:]) av_hyper_embs = torch.concat([uv_hyper_embs, iv_hyper_embs], dim=0) at_hyper_embs = torch.concat([ut_hyper_embs, it_hyper_embs], dim=0) ghe_embs = av_hyper_embs + at_hyper_embs # local embeddings + alpha * global embeddings all_embs = lge_embs + self.alpha * F.normalize(ghe_embs) else: all_embs = cge_embs u_embs, i_embs = torch.split(all_embs, [self.n_users, self.n_items], dim=0) return u_embs, i_embs, [uv_hyper_embs, iv_hyper_embs, ut_hyper_embs, it_hyper_embs] def bpr_loss(self, users, pos_items, neg_items): pos_scores = torch.sum(torch.mul(users, pos_items), dim=1) neg_scores = torch.sum(torch.mul(users, neg_items), dim=1) bpr_loss = -torch.mean(F.logsigmoid(pos_scores - neg_scores)) return bpr_loss def ssl_triple_loss(self, emb1, emb2, all_emb): norm_emb1 = F.normalize(emb1) norm_emb2 = F.normalize(emb2) norm_all_emb = F.normalize(all_emb) pos_score = torch.exp(torch.mul(norm_emb1, norm_emb2).sum(dim=1) / self.tau) ttl_score = torch.exp(torch.matmul(norm_emb1, norm_all_emb.T) / self.tau).sum(dim=1) ssl_loss = -torch.log(pos_score / ttl_score).sum() return ssl_loss def reg_loss(self, *embs): reg_loss = 0 for emb in embs: reg_loss += torch.norm(emb, p=2) reg_loss /= embs[-1].shape[0] return reg_loss def calculate_loss(self, interaction): ua_embeddings, ia_embeddings, hyper_embeddings = self.forward() users = interaction[0] pos_items = interaction[1] neg_items = interaction[2] u_g_embeddings = ua_embeddings[users] pos_i_g_embeddings = ia_embeddings[pos_items] neg_i_g_embeddings = ia_embeddings[neg_items] batch_bpr_loss = self.bpr_loss(u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings) [uv_embs, iv_embs, ut_embs, it_embs] = hyper_embeddings batch_hcl_loss = self.ssl_triple_loss(uv_embs[users], ut_embs[users], ut_embs) + self.ssl_triple_loss(iv_embs[pos_items], it_embs[pos_items], it_embs) batch_reg_loss = self.reg_loss(u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings) loss = batch_bpr_loss + self.cl_weight * batch_hcl_loss + self.reg_weight * batch_reg_loss return loss def full_sort_predict(self, interaction): user = interaction[0] user_embs, item_embs, _ = self.forward() scores = torch.matmul(user_embs[user], item_embs.T) return scores class HGNNLayer(nn.Module): def __init__(self, n_hyper_layer): super(HGNNLayer, self).__init__() self.h_layer = n_hyper_layer def forward(self, i_hyper, u_hyper, embeds): i_ret = embeds for _ in range(self.h_layer): lat = torch.mm(i_hyper.T, i_ret) i_ret = torch.mm(i_hyper, lat) u_ret = torch.mm(u_hyper, lat) return u_ret, i_ret ================================================ FILE: src/models/lightgcn.py ================================================ # -*- coding: utf-8 -*- r""" LightGCN ################################################ Reference: Xiangnan He et al. "LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation." in SIGIR 2020. Reference code: https://github.com/kuandeng/LightGCN """ import numpy as np import scipy.sparse as sp import torch import torch.nn as nn from common.abstract_recommender import GeneralRecommender from common.loss import BPRLoss, EmbLoss from common.init import xavier_uniform_initialization class LightGCN(GeneralRecommender): r"""LightGCN is a GCN-based recommender model. LightGCN includes only the most essential component in GCN — neighborhood aggregation — for collaborative filtering. Specifically, LightGCN learns user and item embeddings by linearly propagating them on the user-item interaction graph, and uses the weighted sum of the embeddings learned at all layers as the final embedding. We implement the model following the original author with a pairwise training mode. """ def __init__(self, config, dataset): super(LightGCN, self).__init__(config, dataset) # load dataset info self.interaction_matrix = dataset.inter_matrix( form='coo').astype(np.float32) # load parameters info self.latent_dim = config['embedding_size'] # int type:the embedding size of lightGCN self.n_layers = config['n_layers'] # int type:the layer num of lightGCN self.reg_weight = config['reg_weight'] # float32 type: the weight decay for l2 normalizaton self.mf_loss = BPRLoss() self.reg_loss = EmbLoss() self.embedding_dict = self._init_model() # generate intermediate data self.norm_adj_matrix = self.get_norm_adj_mat().to(self.device) # parameters initialization #self.apply(xavier_uniform_initialization) def _init_model(self): initializer = nn.init.xavier_uniform_ embedding_dict = nn.ParameterDict({ 'user_emb': nn.Parameter(initializer(torch.empty(self.n_users, self.latent_dim))), 'item_emb': nn.Parameter(initializer(torch.empty(self.n_items, self.latent_dim))) }) return embedding_dict def get_norm_adj_mat(self): r"""Get the normalized interaction matrix of users and items. Construct the square matrix from the training data and normalize it using the laplace matrix. .. math:: A_{hat} = D^{-0.5} \times A \times D^{-0.5} Returns: Sparse tensor of the normalized interaction matrix. """ # build adj matrix A = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32) inter_M = self.interaction_matrix inter_M_t = self.interaction_matrix.transpose() data_dict = dict(zip(zip(inter_M.row, inter_M.col+self.n_users), [1]*inter_M.nnz)) data_dict.update(dict(zip(zip(inter_M_t.row+self.n_users, inter_M_t.col), [1]*inter_M_t.nnz))) A._update(data_dict) # norm adj matrix sumArr = (A > 0).sum(axis=1) # add epsilon to avoid Devide by zero Warning diag = np.array(sumArr.flatten())[0] + 1e-7 diag = np.power(diag, -0.5) D = sp.diags(diag) L = D * A * D # covert norm_adj matrix to tensor L = sp.coo_matrix(L) row = L.row col = L.col i = torch.LongTensor([row, col]) data = torch.FloatTensor(L.data) SparseL = torch.sparse.FloatTensor(i, data, torch.Size(L.shape)) return SparseL def get_ego_embeddings(self): r"""Get the embedding of users and items and combine to an embedding matrix. Returns: Tensor of the embedding matrix. Shape of [n_items+n_users, embedding_dim] """ # user_embeddings = self.user_embedding.weight # item_embeddings = self.item_embedding.weight # ego_embeddings = torch.cat([user_embeddings, item_embeddings], dim=0) ego_embeddings = torch.cat([self.embedding_dict['user_emb'], self.embedding_dict['item_emb']], 0) return ego_embeddings def forward(self): all_embeddings = self.get_ego_embeddings() embeddings_list = [all_embeddings] for layer_idx in range(self.n_layers): all_embeddings = torch.sparse.mm(self.norm_adj_matrix, all_embeddings) embeddings_list.append(all_embeddings) lightgcn_all_embeddings = torch.stack(embeddings_list, dim=1) lightgcn_all_embeddings = torch.mean(lightgcn_all_embeddings, dim=1) user_all_embeddings = lightgcn_all_embeddings[:self.n_users, :] item_all_embeddings = lightgcn_all_embeddings[self.n_users:, :] return user_all_embeddings, item_all_embeddings def calculate_loss(self, interaction): user = interaction[0] pos_item = interaction[1] neg_item = interaction[2] user_all_embeddings, item_all_embeddings = self.forward() u_embeddings = user_all_embeddings[user, :] posi_embeddings = item_all_embeddings[pos_item, :] negi_embeddings = item_all_embeddings[neg_item, :] # calculate BPR Loss pos_scores = torch.mul(u_embeddings, posi_embeddings).sum(dim=1) neg_scores = torch.mul(u_embeddings, negi_embeddings).sum(dim=1) mf_loss = self.mf_loss(pos_scores, neg_scores) # calculate BPR Loss u_ego_embeddings = self.embedding_dict['user_emb'][user, :] posi_ego_embeddings = self.embedding_dict['item_emb'][pos_item, :] negi_ego_embeddings = self.embedding_dict['item_emb'][neg_item, :] reg_loss = self.reg_loss(u_ego_embeddings, posi_ego_embeddings, negi_ego_embeddings) loss = mf_loss + self.reg_weight * reg_loss return loss def full_sort_predict(self, interaction): user = interaction[0] restore_user_e, restore_item_e = self.forward() u_embeddings = restore_user_e[user, :] # dot with all item embedding to accelerate scores = torch.matmul(u_embeddings, restore_item_e.transpose(0, 1)) return scores ================================================ FILE: src/models/mgcn.py ================================================ # coding: utf-8 # @email: y463213402@gmail.com r""" MGCN ################################################ Reference: https://github.com/demonph10/MGCN ACM MM'2023: [Multi-View Graph Convolutional Network for Multimedia Recommendation] """ import os import numpy as np import scipy.sparse as sp import torch import torch.nn as nn import torch.nn.functional as F from common.abstract_recommender import GeneralRecommender from utils.utils import build_sim, compute_normalized_laplacian, build_knn_neighbourhood, build_knn_normalized_graph class MGCN(GeneralRecommender): def __init__(self, config, dataset): super(MGCN, self).__init__(config, dataset) self.sparse = True self.cl_loss = config['cl_loss'] self.n_ui_layers = config['n_ui_layers'] self.embedding_dim = config['embedding_size'] self.knn_k = config['knn_k'] self.n_layers = config['n_layers'] self.reg_weight = config['reg_weight'] # load dataset info self.interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32) self.user_embedding = nn.Embedding(self.n_users, self.embedding_dim) self.item_id_embedding = nn.Embedding(self.n_items, self.embedding_dim) nn.init.xavier_uniform_(self.user_embedding.weight) nn.init.xavier_uniform_(self.item_id_embedding.weight) dataset_path = os.path.abspath(config['data_path'] + config['dataset']) image_adj_file = os.path.join(dataset_path, 'image_adj_{}_{}.pt'.format(self.knn_k, self.sparse)) text_adj_file = os.path.join(dataset_path, 'text_adj_{}_{}.pt'.format(self.knn_k, self.sparse)) self.norm_adj = self.get_adj_mat() self.R = self.sparse_mx_to_torch_sparse_tensor(self.R).float().to(self.device) self.norm_adj = self.sparse_mx_to_torch_sparse_tensor(self.norm_adj).float().to(self.device) if self.v_feat is not None: self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=False) if os.path.exists(image_adj_file): image_adj = torch.load(image_adj_file) else: image_adj = build_sim(self.image_embedding.weight.detach()) image_adj = build_knn_normalized_graph(image_adj, topk=self.knn_k, is_sparse=self.sparse, norm_type='sym') torch.save(image_adj, image_adj_file) self.image_original_adj = image_adj.cuda() if self.t_feat is not None: self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=False) if os.path.exists(text_adj_file): text_adj = torch.load(text_adj_file) else: text_adj = build_sim(self.text_embedding.weight.detach()) text_adj = build_knn_normalized_graph(text_adj, topk=self.knn_k, is_sparse=self.sparse, norm_type='sym') torch.save(text_adj, text_adj_file) self.text_original_adj = text_adj.cuda() if self.v_feat is not None: self.image_trs = nn.Linear(self.v_feat.shape[1], self.embedding_dim) if self.t_feat is not None: self.text_trs = nn.Linear(self.t_feat.shape[1], self.embedding_dim) self.softmax = nn.Softmax(dim=-1) self.query_common = nn.Sequential( nn.Linear(self.embedding_dim, self.embedding_dim), nn.Tanh(), nn.Linear(self.embedding_dim, 1, bias=False) ) self.gate_v = nn.Sequential( nn.Linear(self.embedding_dim, self.embedding_dim), nn.Sigmoid() ) self.gate_t = nn.Sequential( nn.Linear(self.embedding_dim, self.embedding_dim), nn.Sigmoid() ) self.gate_image_prefer = nn.Sequential( nn.Linear(self.embedding_dim, self.embedding_dim), nn.Sigmoid() ) self.gate_text_prefer = nn.Sequential( nn.Linear(self.embedding_dim, self.embedding_dim), nn.Sigmoid() ) self.tau = 0.5 def pre_epoch_processing(self): pass def get_adj_mat(self): adj_mat = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32) adj_mat = adj_mat.tolil() R = self.interaction_matrix.tolil() adj_mat[:self.n_users, self.n_users:] = R adj_mat[self.n_users:, :self.n_users] = R.T adj_mat = adj_mat.todok() def normalized_adj_single(adj): rowsum = np.array(adj.sum(1)) d_inv = np.power(rowsum, -0.5).flatten() d_inv[np.isinf(d_inv)] = 0. d_mat_inv = sp.diags(d_inv) norm_adj = d_mat_inv.dot(adj_mat) norm_adj = norm_adj.dot(d_mat_inv) # norm_adj = adj.dot(d_mat_inv) # print('generate single-normalized adjacency matrix.') return norm_adj.tocoo() # norm_adj_mat = normalized_adj_single(adj_mat + sp.eye(adj_mat.shape[0])) norm_adj_mat = normalized_adj_single(adj_mat) norm_adj_mat = norm_adj_mat.tolil() self.R = norm_adj_mat[:self.n_users, self.n_users:] # norm_adj_mat = normalized_adj_single(adj_mat + sp.eye(adj_mat.shape[0])) return norm_adj_mat.tocsr() def sparse_mx_to_torch_sparse_tensor(self, sparse_mx): """Convert a scipy sparse matrix to a torch sparse tensor.""" sparse_mx = sparse_mx.tocoo().astype(np.float32) indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)) values = torch.from_numpy(sparse_mx.data) shape = torch.Size(sparse_mx.shape) return torch.sparse.FloatTensor(indices, values, shape) def forward(self, adj, train=False): if self.v_feat is not None: image_feats = self.image_trs(self.image_embedding.weight) if self.t_feat is not None: text_feats = self.text_trs(self.text_embedding.weight) # Behavior-Guided Purifier image_item_embeds = torch.multiply(self.item_id_embedding.weight, self.gate_v(image_feats)) text_item_embeds = torch.multiply(self.item_id_embedding.weight, self.gate_t(text_feats)) # User-Item View item_embeds = self.item_id_embedding.weight user_embeds = self.user_embedding.weight ego_embeddings = torch.cat([user_embeds, item_embeds], dim=0) all_embeddings = [ego_embeddings] for i in range(self.n_ui_layers): side_embeddings = torch.sparse.mm(adj, ego_embeddings) ego_embeddings = side_embeddings all_embeddings += [ego_embeddings] all_embeddings = torch.stack(all_embeddings, dim=1) all_embeddings = all_embeddings.mean(dim=1, keepdim=False) content_embeds = all_embeddings # Item-Item View if self.sparse: for i in range(self.n_layers): image_item_embeds = torch.sparse.mm(self.image_original_adj, image_item_embeds) else: for i in range(self.n_layers): image_item_embeds = torch.mm(self.image_original_adj, image_item_embeds) image_user_embeds = torch.sparse.mm(self.R, image_item_embeds) image_embeds = torch.cat([image_user_embeds, image_item_embeds], dim=0) if self.sparse: for i in range(self.n_layers): text_item_embeds = torch.sparse.mm(self.text_original_adj, text_item_embeds) else: for i in range(self.n_layers): text_item_embeds = torch.mm(self.text_original_adj, text_item_embeds) text_user_embeds = torch.sparse.mm(self.R, text_item_embeds) text_embeds = torch.cat([text_user_embeds, text_item_embeds], dim=0) # Behavior-Aware Fuser att_common = torch.cat([self.query_common(image_embeds), self.query_common(text_embeds)], dim=-1) weight_common = self.softmax(att_common) common_embeds = weight_common[:, 0].unsqueeze(dim=1) * image_embeds + weight_common[:, 1].unsqueeze( dim=1) * text_embeds sep_image_embeds = image_embeds - common_embeds sep_text_embeds = text_embeds - common_embeds image_prefer = self.gate_image_prefer(content_embeds) text_prefer = self.gate_text_prefer(content_embeds) sep_image_embeds = torch.multiply(image_prefer, sep_image_embeds) sep_text_embeds = torch.multiply(text_prefer, sep_text_embeds) side_embeds = (sep_image_embeds + sep_text_embeds + common_embeds) / 3 all_embeds = content_embeds + side_embeds all_embeddings_users, all_embeddings_items = torch.split(all_embeds, [self.n_users, self.n_items], dim=0) if train: return all_embeddings_users, all_embeddings_items, side_embeds, content_embeds return all_embeddings_users, all_embeddings_items def bpr_loss(self, users, pos_items, neg_items): pos_scores = torch.sum(torch.mul(users, pos_items), dim=1) neg_scores = torch.sum(torch.mul(users, neg_items), dim=1) regularizer = 1. / 2 * (users ** 2).sum() + 1. / 2 * (pos_items ** 2).sum() + 1. / 2 * (neg_items ** 2).sum() regularizer = regularizer / self.batch_size maxi = F.logsigmoid(pos_scores - neg_scores) mf_loss = -torch.mean(maxi) emb_loss = self.reg_weight * regularizer reg_loss = 0.0 return mf_loss, emb_loss, reg_loss def InfoNCE(self, view1, view2, temperature): view1, view2 = F.normalize(view1, dim=1), F.normalize(view2, dim=1) pos_score = (view1 * view2).sum(dim=-1) pos_score = torch.exp(pos_score / temperature) ttl_score = torch.matmul(view1, view2.transpose(0, 1)) ttl_score = torch.exp(ttl_score / temperature).sum(dim=1) cl_loss = -torch.log(pos_score / ttl_score) return torch.mean(cl_loss) def calculate_loss(self, interaction): users = interaction[0] pos_items = interaction[1] neg_items = interaction[2] ua_embeddings, ia_embeddings, side_embeds, content_embeds = self.forward( self.norm_adj, train=True) u_g_embeddings = ua_embeddings[users] pos_i_g_embeddings = ia_embeddings[pos_items] neg_i_g_embeddings = ia_embeddings[neg_items] batch_mf_loss, batch_emb_loss, batch_reg_loss = self.bpr_loss(u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings) side_embeds_users, side_embeds_items = torch.split(side_embeds, [self.n_users, self.n_items], dim=0) content_embeds_user, content_embeds_items = torch.split(content_embeds, [self.n_users, self.n_items], dim=0) cl_loss = self.InfoNCE(side_embeds_items[pos_items], content_embeds_items[pos_items], 0.2) + self.InfoNCE( side_embeds_users[users], content_embeds_user[users], 0.2) return batch_mf_loss + batch_emb_loss + batch_reg_loss + self.cl_loss * cl_loss def full_sort_predict(self, interaction): user = interaction[0] restore_user_e, restore_item_e = self.forward(self.norm_adj) u_embeddings = restore_user_e[user] # dot with all item embedding to accelerate scores = torch.matmul(u_embeddings, restore_item_e.transpose(0, 1)) return scores ================================================ FILE: src/models/mmgcn.py ================================================ # coding: utf-8 """ MMGCN: Multi-modal Graph Convolution Network for Personalized Recommendation of Micro-video. In ACM MM`19, """ import os import numpy as np import scipy.sparse as sp import torch import torch.nn as nn import torch.nn.functional as F from torch_geometric.nn.conv import MessagePassing from torch_geometric.utils import remove_self_loops, add_self_loops, degree import torch_geometric from common.abstract_recommender import GeneralRecommender from common.loss import BPRLoss, EmbLoss from common.init import xavier_uniform_initialization class MMGCN(GeneralRecommender): def __init__(self, config, dataset): super(MMGCN, self).__init__(config, dataset) self.num_user = self.n_users self.num_item = self.n_items num_user = self.n_users num_item = self.n_items dim_x = config['embedding_size'] num_layer = config['n_layers'] batch_size = config['train_batch_size'] # not used self.aggr_mode = 'mean' self.concate = 'False' has_id = True self.weight = torch.tensor([[1.0], [-1.0]]).to(self.device) self.reg_weight = config['reg_weight'] # packing interaction in training into edge_index train_interactions = dataset.inter_matrix(form='coo').astype(np.float32) edge_index = torch.tensor(self.pack_edge_index(train_interactions), dtype=torch.long) self.edge_index = edge_index.t().contiguous().to(self.device) self.edge_index = torch.cat((self.edge_index, self.edge_index[[1, 0]]), dim=1) self.num_modal = 0 if self.v_feat is not None: self.v_gcn = GCN(self.edge_index, batch_size, num_user, num_item, self.v_feat.size(1), dim_x, self.aggr_mode, self.concate, num_layer=num_layer, has_id=has_id, dim_latent=256, device=self.device) self.num_modal += 1 if self.t_feat is not None: self.t_gcn = GCN(self.edge_index, batch_size, num_user, num_item, self.t_feat.size(1), dim_x, self.aggr_mode, self.concate, num_layer=num_layer, has_id=has_id, device=self.device) self.num_modal += 1 self.id_embedding = nn.init.xavier_normal_(torch.rand((num_user+num_item, dim_x), requires_grad=True)).to(self.device) self.result = nn.init.xavier_normal_(torch.rand((num_user + num_item, dim_x))).to(self.device) def pack_edge_index(self, inter_mat): rows = inter_mat.row cols = inter_mat.col + self.n_users # ndarray([598918, 2]) for ml-imdb return np.column_stack((rows, cols)) def forward(self): representation = None if self.v_feat is not None: representation = self.v_gcn(self.v_feat, self.id_embedding) if self.t_feat is not None: if representation is None: representation = self.t_gcn(self.t_feat, self.id_embedding) else: representation += self.t_gcn(self.t_feat, self.id_embedding) representation /= self.num_modal self.result = representation return representation def calculate_loss(self, interaction): batch_users = interaction[0] pos_items = interaction[1] + self.n_users neg_items = interaction[2] + self.n_users user_tensor = batch_users.repeat_interleave(2) stacked_items = torch.stack((pos_items, neg_items)) item_tensor = stacked_items.t().contiguous().view(-1) out = self.forward() user_score = out[user_tensor] item_score = out[item_tensor] score = torch.sum(user_score * item_score, dim=1).view(-1, 2) loss = -torch.mean(torch.log(torch.sigmoid(torch.matmul(score, self.weight)))) reg_embedding_loss = (self.id_embedding[user_tensor]**2 + self.id_embedding[item_tensor]**2).mean() if self.v_feat is not None: reg_embedding_loss += (self.v_gcn.preference**2).mean() reg_loss = self.reg_weight * reg_embedding_loss return loss + reg_loss def full_sort_predict(self, interaction): user_tensor = self.result[:self.n_users] item_tensor = self.result[self.n_users:] temp_user_tensor = user_tensor[interaction[0], :] score_matrix = torch.matmul(temp_user_tensor, item_tensor.t()) return score_matrix class GCN(torch.nn.Module): def __init__(self, edge_index, batch_size, num_user, num_item, dim_feat, dim_id, aggr_mode, concate, num_layer, has_id, dim_latent=None, device='cpu'): super(GCN, self).__init__() self.batch_size = batch_size self.num_user = num_user self.num_item = num_item self.dim_id = dim_id self.dim_feat = dim_feat self.dim_latent = dim_latent self.edge_index = edge_index self.aggr_mode = aggr_mode self.concate = concate self.num_layer = num_layer self.has_id = has_id self.device = device if self.dim_latent: self.preference = nn.init.xavier_normal_(torch.rand((num_user, self.dim_latent), requires_grad=True)).to(self.device) #self.preference = nn.Parameter(nn.init.xavier_normal_(torch.rand((num_user, self.dim_latent)))) self.MLP = nn.Linear(self.dim_feat, self.dim_latent) self.conv_embed_1 = BaseModel(self.dim_latent, self.dim_latent, aggr=self.aggr_mode) nn.init.xavier_normal_(self.conv_embed_1.weight) self.linear_layer1 = nn.Linear(self.dim_latent, self.dim_id) nn.init.xavier_normal_(self.linear_layer1.weight) self.g_layer1 = nn.Linear(self.dim_latent + self.dim_id, self.dim_id) if self.concate else nn.Linear( self.dim_latent, self.dim_id) nn.init.xavier_normal_(self.g_layer1.weight) else: self.preference = nn.init.xavier_normal_(torch.rand((num_user, self.dim_feat), requires_grad=True)).to(self.device) #self.preference = nn.Parameter(nn.init.xavier_normal_(torch.rand((num_user, self.dim_feat)))) self.conv_embed_1 = BaseModel(self.dim_feat, self.dim_feat, aggr=self.aggr_mode) nn.init.xavier_normal_(self.conv_embed_1.weight) self.linear_layer1 = nn.Linear(self.dim_feat, self.dim_id) nn.init.xavier_normal_(self.linear_layer1.weight) self.g_layer1 = nn.Linear(self.dim_feat + self.dim_id, self.dim_id) if self.concate else nn.Linear( self.dim_feat, self.dim_id) nn.init.xavier_normal_(self.g_layer1.weight) self.conv_embed_2 = BaseModel(self.dim_id, self.dim_id, aggr=self.aggr_mode) nn.init.xavier_normal_(self.conv_embed_2.weight) self.linear_layer2 = nn.Linear(self.dim_id, self.dim_id) nn.init.xavier_normal_(self.linear_layer2.weight) self.g_layer2 = nn.Linear(self.dim_id + self.dim_id, self.dim_id) if self.concate else nn.Linear(self.dim_id, self.dim_id) self.conv_embed_3 = BaseModel(self.dim_id, self.dim_id, aggr=self.aggr_mode) nn.init.xavier_normal_(self.conv_embed_3.weight) self.linear_layer3 = nn.Linear(self.dim_id, self.dim_id) nn.init.xavier_normal_(self.linear_layer3.weight) self.g_layer3 = nn.Linear(self.dim_id + self.dim_id, self.dim_id) if self.concate else nn.Linear(self.dim_id, self.dim_id) def forward(self, features, id_embedding): temp_features = self.MLP(features) if self.dim_latent else features x = torch.cat((self.preference, temp_features), dim=0) x = F.normalize(x) h = F.leaky_relu(self.conv_embed_1(x, self.edge_index)) # equation 1 x_hat = F.leaky_relu(self.linear_layer1(x)) + id_embedding if self.has_id else F.leaky_relu( self.linear_layer1(x)) # equation 5 x = F.leaky_relu(self.g_layer1(torch.cat((h, x_hat), dim=1))) if self.concate else F.leaky_relu( self.g_layer1(h) + x_hat) h = F.leaky_relu(self.conv_embed_2(x, self.edge_index)) # equation 1 x_hat = F.leaky_relu(self.linear_layer2(x)) + id_embedding if self.has_id else F.leaky_relu( self.linear_layer2(x)) # equation 5 x = F.leaky_relu(self.g_layer2(torch.cat((h, x_hat), dim=1))) if self.concate else F.leaky_relu( self.g_layer2(h) + x_hat) h = F.leaky_relu(self.conv_embed_3(x, self.edge_index)) # equation 1 x_hat = F.leaky_relu(self.linear_layer3(x)) + id_embedding if self.has_id else F.leaky_relu( self.linear_layer3(x)) # equation 5 x = F.leaky_relu(self.g_layer3(torch.cat((h, x_hat), dim=1))) if self.concate else F.leaky_relu( self.g_layer3(h) + x_hat) return x class BaseModel(MessagePassing): def __init__(self, in_channels, out_channels, normalize=True, bias=True, aggr='add', **kwargs): super(BaseModel, self).__init__(aggr=aggr, **kwargs) self.aggr = aggr self.in_channels = in_channels self.out_channels = out_channels self.normalize = normalize self.weight = nn.Parameter(torch.Tensor(self.in_channels, out_channels)) self.reset_parameters() def reset_parameters(self): torch_geometric.nn.inits.uniform(self.in_channels, self.weight) def forward(self, x, edge_index, size=None): x = torch.matmul(x, self.weight) return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x) def message(self, x_j, edge_index, size): return x_j def update(self, aggr_out): return aggr_out def __repr(self): return '{}({},{})'.format(self.__class__.__name__, self.in_channels, self.out_channels) ================================================ FILE: src/models/mvgae.py ================================================ # coding: utf-8 """ https://github.com/jing-1/MVGAE Paper: Multi-Modal Variational Graph Auto-Encoder for Recommendation Systems IEEE TMM'21 """ import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from torch_geometric.nn.conv import MessagePassing from torch_geometric.utils import remove_self_loops, add_self_loops, degree from torch_geometric.nn.inits import uniform from torch.autograd import Variable from common.abstract_recommender import GeneralRecommender from common.loss import BPRLoss, EmbLoss from common.init import xavier_uniform_initialization EPS = 1e-15 MAX_LOGVAR = 10 class MVGAE(GeneralRecommender): def __init__(self, config, dataset): super(MVGAE, self).__init__(config, dataset) self.experts = ProductOfExperts() #self.dataset = config['dataset'] self.dataset = 'amazon' self.batch_size = config['train_batch_size'] self.num_user = self.n_users self.num_item = self.n_items num_user = self.n_users num_item = self.n_items num_layer = config['n_layers'] self.aggr_mode = 'mean' self.concate = False self.dim_x = config['embedding_size'] self.beta = config['beta'] self.collaborative = nn.init.xavier_normal_(torch.rand((num_item, self.dim_x), requires_grad=True)).to(self.device) # packing interaction in training into edge_index train_interactions = dataset.inter_matrix(form='coo').astype(np.float32) edge_index = torch.tensor(self.pack_edge_index(train_interactions), dtype=torch.long) self.edge_index = edge_index.t().contiguous().to(self.device) self.edge_index = torch.cat((self.edge_index, self.edge_index[[1, 0]]), dim=1) if self.v_feat is not None: self.v_gcn = GCN(self.device, self.v_feat, self.edge_index, self.batch_size, num_user, num_item, self.dim_x, self.aggr_mode, self.concate, num_layer=num_layer, dim_latent=128) # 256) if self.t_feat is not None: self.t_gcn = GCN(self.device, self.t_feat, self.edge_index, self.batch_size, num_user, num_item, self.dim_x, self.aggr_mode, self.concate, num_layer=num_layer, dim_latent=128) # 256) self.c_gcn = GCN(self.device, self.collaborative, self.edge_index, self.batch_size, num_user, num_item, self.dim_x, self.aggr_mode, self.concate, num_layer=num_layer, dim_latent=128) # 256) self.result_embed = nn.init.xavier_normal_(torch.rand((num_user + num_item, self.dim_x))).to(self.device) def pack_edge_index(self, inter_mat): rows = inter_mat.row cols = inter_mat.col + self.n_users # ndarray([598918, 2]) for ml-imdb return np.column_stack((rows, cols)) def reparametrize(self, mu, logvar): logvar = logvar.clamp(max=MAX_LOGVAR) if self.training: return mu + torch.randn_like(logvar) * 0.1 * torch.exp(logvar.mul(0.5)) else: return mu def dot_product_decode_neg(self, z, user, neg_items, sigmoid=True): # multiple negs, for comparison with MAML # print('user shape: ',user,user.shape) users = torch.unsqueeze(user, 1) # print('users shape: ', users,users.shape) neg_items = neg_items # print('neg_items: ', neg_items,neg_items.shape) # print('neg_items.size(1):', neg_items.size(0)) re_users = users.repeat(1, neg_items.size(0)) neg_values = torch.sum(z[re_users] * z[neg_items], -1) max_neg_value = torch.max(neg_values, dim=-1).values return torch.sigmoid(max_neg_value) if sigmoid else max_neg_value def dot_product_decode(self, z, edge_index, sigmoid=True): value = torch.sum(z[edge_index[0]] * z[edge_index[1]], dim=1) return torch.sigmoid(value) if sigmoid else value def forward(self): v_mu, v_logvar = self.v_gcn() t_mu, t_logvar = self.t_gcn() c_mu, c_logvar = self.c_gcn() self.v_logvar = v_logvar self.t_logvar = t_logvar self.v_mu = v_mu self.t_mu = t_mu mu = torch.stack([v_mu, t_mu], dim=0) logvar = torch.stack([v_logvar, t_logvar], dim=0) pd_mu, pd_logvar, _ = self.experts(mu, logvar) del mu del logvar mu = torch.stack([pd_mu, c_mu], dim=0) logvar = torch.stack([pd_logvar, c_logvar], dim=0) pd_mu, pd_logvar, _ = self.experts(mu, logvar) del mu del logvar z = self.reparametrize(pd_mu, pd_logvar) # for more sparse dataset like amazon, use signoid to regulization. for alishop,dont use sigmoid for better results if 'amazon' in self.dataset: self.result_embed = torch.sigmoid(pd_mu) else: self.result_embed = pd_mu return pd_mu, pd_logvar, z, v_mu, v_logvar, t_mu, t_logvar, c_mu, c_logvar def recon_loss(self, z, pos_edge_index, user, neg_items): r"""Given latent variables :obj:`z`, computes the binary cross entropy loss for positive edges :obj:`pos_edge_index` and negative sampled edges. Args: z (Tensor): The latent space :math:`\mathbf{Z}`. pos_edge_index (LongTensor): The positive edges to train against. """ # for more sparse dataset like amazon, use signoid to regulization. for alishop,dont use sigmoid for better results if 'amazon' in self.dataset: z = torch.sigmoid(z) pos_scores = self.dot_product_decode(z, pos_edge_index, sigmoid=True) neg_scores = self.dot_product_decode_neg(z, user, neg_items, sigmoid=True) loss = -torch.sum(torch.log2(torch.sigmoid(pos_scores - neg_scores))) return loss def kl_loss(self, mu, logvar): r"""Computes the KL loss, either for the passed arguments :obj:`mu` and :obj:`logvar`, or based on latent variables from last encoding. Args: mu (Tensor, optional): The latent space for :math:`\mu`. If set to :obj:`None`, uses the last computation of :math:`mu`. (default: :obj:`None`) logvar (Tensor, optional): The latent space for :math:`\log\sigma^2`. If set to :obj:`None`, uses the last computation of :math:`\log\sigma^2`.(default: :obj:`None`) """ logvar = logvar.clamp(max=MAX_LOGVAR) return -0.5 * torch.mean( torch.sum(1 + logvar - mu ** 2 - logvar.exp(), dim=1)) def calculate_loss(self, interaction): user = interaction[0] pos_items = interaction[1] neg_items = interaction[2] #user = user.long() #pos_items = pos_items.long() #neg_items = torch.tensor(neg_items, dtype=torch.long) pos_edge_index = torch.stack([user, pos_items], dim=0) pd_mu, pd_logvar, z, v_mu, v_logvar, t_mu, t_logvar, c_mu, c_logvar = self.forward() z_v = self.reparametrize(v_mu, v_logvar) z_t = self.reparametrize(t_mu, t_logvar) z_c = self.reparametrize(c_mu, c_logvar) recon_loss = self.recon_loss(z, pos_edge_index, user, neg_items) kl_loss = self.kl_loss(pd_mu, pd_logvar) loss_multi = recon_loss + self.beta * kl_loss loss_v = self.recon_loss(z_v, pos_edge_index, user, neg_items) + self.beta * self.kl_loss(v_mu, v_logvar) loss_t = self.recon_loss(z_t, pos_edge_index, user, neg_items) + self.beta * self.kl_loss(t_mu, t_logvar) loss_c = self.recon_loss(z_c, pos_edge_index, user, neg_items) + self.beta* self.kl_loss(c_mu, c_logvar) return loss_multi + loss_v + loss_t + loss_c def full_sort_predict(self, interaction): user_tensor = self.result_embed[:self.n_users] item_tensor = self.result_embed[self.n_users:] temp_user_tensor = user_tensor[interaction[0], :] score_matrix = torch.matmul(temp_user_tensor, item_tensor.t()) return score_matrix class GCN(torch.nn.Module): def __init__(self, device, features, edge_index, batch_size, num_user, num_item, dim_id, aggr_mode, concate, num_layer, dim_latent=None): super(GCN, self).__init__() self.device = device self.batch_size = batch_size self.num_user = num_user self.num_item = num_item self.dim_id = dim_id self.dim_feat = features.size(1) self.dim_latent = dim_latent self.edge_index = edge_index self.features = features self.aggr_mode = aggr_mode self.concate = concate self.num_layer = num_layer if self.dim_latent: self.preference = nn.init.xavier_normal_(torch.rand((num_user, self.dim_latent), requires_grad=True)).to( self.device) self.MLP = nn.Linear(self.dim_feat, self.dim_latent) nn.init.xavier_normal_(self.MLP.weight) self.conv_embed_1 = BaseModel(self.dim_latent, self.dim_id, aggr=self.aggr_mode) nn.init.xavier_normal_(self.conv_embed_1.weight) self.linear_layer1 = nn.Linear(self.dim_latent, self.dim_id) nn.init.xavier_normal_(self.linear_layer1.weight) self.g_layer1 = nn.Linear(self.dim_id + self.dim_id, self.dim_id) if self.concate else nn.Linear( self.dim_id, self.dim_id) nn.init.xavier_normal_(self.g_layer1.weight) else: self.preference = nn.init.xavier_normal_(torch.rand((num_user, self.dim_feat), requires_grad=True)).to( self.device) self.conv_embed_1 = BaseModel(self.dim_feat, self.dim_id, aggr=self.aggr_mode) nn.init.xavier_normal_(self.conv_embed_1.weight) self.linear_layer1 = nn.Linear(self.dim_feat, self.dim_id) nn.init.xavier_normal_(self.linear_layer1.weight) self.g_layer1 = nn.Linear(self.dim_feat + self.dim_id, self.dim_id) if self.concate else nn.Linear( self.dim_id, self.dim_id) nn.init.xavier_normal_(self.g_layer1.weight) self.conv_embed_2 = BaseModel(self.dim_id, self.dim_id, aggr=self.aggr_mode) nn.init.xavier_normal_(self.conv_embed_2.weight) self.linear_layer2 = nn.Linear(self.dim_id, self.dim_id) nn.init.xavier_normal_(self.linear_layer2.weight) self.g_layer2 = nn.Linear(self.dim_id + self.dim_id, self.dim_id) if self.concate else nn.Linear(self.dim_id, self.dim_id) # nn.init.xavier_normal_(self.g_layer2.weight) self.conv_embed_4 = BaseModel(self.dim_id, self.dim_id, aggr=self.aggr_mode) nn.init.xavier_normal_(self.conv_embed_4.weight) self.linear_layer4 = nn.Linear(self.dim_id, self.dim_id) nn.init.xavier_normal_(self.linear_layer4.weight) self.g_layer4 = nn.Linear(self.dim_id + self.dim_id, self.dim_id) if self.concate else nn.Linear(self.dim_id, self.dim_id) nn.init.xavier_normal_(self.g_layer4.weight) self.conv_embed_5 = BaseModel(self.dim_id, self.dim_id, aggr=self.aggr_mode) nn.init.xavier_normal_(self.conv_embed_5.weight) self.linear_layer5 = nn.Linear(self.dim_id, self.dim_id) nn.init.xavier_normal_(self.linear_layer5.weight) self.g_layer5 = nn.Linear(self.dim_id + self.dim_id, self.dim_id) if self.concate else nn.Linear(self.dim_id, self.dim_id) nn.init.xavier_normal_(self.g_layer5.weight) def forward(self): # print(self.features) # print(self.MLP.weight) temp_features = self.MLP(self.features) if self.dim_latent else self.features # print('temp feature: ',temp_features) x = torch.cat((self.preference, temp_features), dim=0) # print(x) x = F.normalize(x).to(self.device) # print(x) if self.num_layer > 0: h = F.leaky_relu(self.conv_embed_1(x, self.edge_index)) x_hat = F.leaky_relu(self.linear_layer1(x)) x = F.leaky_relu(self.g_layer1(torch.cat((h, x_hat), dim=1))) if self.concate else F.leaky_relu( self.g_layer1(h)) del x_hat del h if self.num_layer > 1: h = F.leaky_relu(self.conv_embed_2(x, self.edge_index)) x_hat = F.leaky_relu(self.linear_layer2(x)) x = F.leaky_relu(self.g_layer2(torch.cat((h, x_hat), dim=1))) if self.concate else F.leaky_relu( self.g_layer2(h)) del h del x_hat mu = F.leaky_relu(self.conv_embed_4(x, self.edge_index)) x_hat = F.leaky_relu(self.linear_layer4(x)) mu = self.g_layer4(torch.cat((mu, x_hat), dim=1)) if self.concate else self.g_layer4(mu) + x_hat del x_hat logvar = F.leaky_relu(self.conv_embed_5(x, self.edge_index)) x_hat = F.leaky_relu(self.linear_layer5(x)) logvar = self.g_layer5(torch.cat((logvar, x_hat), dim=1)) if self.concate else self.g_layer5(logvar) + x_hat del x_hat return mu, logvar class ProductOfExperts(torch.nn.Module): def __init__(self): super(ProductOfExperts, self).__init__() """Return parameters for product of independent experts. See https://arxiv.org/pdf/1410.7827.pdf for equations. @param mu: M x D for M experts @param logvar: M x D for M experts """ def forward(self, mu, logvar, eps=1e-8): var = torch.exp(logvar) + eps # precision of i-th Gaussian expert at point x T = 1. / var pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0) pd_var = 1. / torch.sum(T, dim=0) pd_logvar = torch.log(pd_var) return pd_mu, pd_logvar, pd_var class BaseModel(MessagePassing): def __init__(self, in_channels, out_channels, normalize=True, bias=True, aggr='add', **kwargs): super(BaseModel, self).__init__(aggr=aggr, **kwargs) self.aggr = aggr self.in_channels = in_channels self.out_channels = out_channels self.normalize = normalize self.weight = Parameter(torch.Tensor(self.in_channels, out_channels)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): uniform(self.in_channels, self.weight) uniform(self.in_channels, self.bias) def forward(self, x, edge_index, size=None): if size is None: edge_index, _ = remove_self_loops(edge_index) edge_index, _ = add_self_loops(edge_index.long(), num_nodes=x.size(0)) edge_index = edge_index.long() x = x.unsqueeze(-1) if x.dim() == 1 else x x = torch.matmul(x, self.weight) return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x) def message(self, x_j, edge_index, size): if self.aggr == 'add': row, col = edge_index deg = degree(row, size[0], dtype=x_j.dtype) deg_inv_sqrt = deg.pow(-0.5) norm = deg_inv_sqrt[row] * deg_inv_sqrt[col] return norm.view(-1, 1) * x_j return x_j def update(self, aggr_out): if self.bias is not None: aggr_out = aggr_out + self.bias if self.normalize: aggr_out = F.normalize(aggr_out, p=2, dim=-1) return F.dropout(aggr_out, p=0.1, training=self.training) def __repr(self): return '{}({},{})'.format(self.__class__.__name__, self.in_channels, self.out_channels) ================================================ FILE: src/models/pgl.py ================================================ # coding: utf-8 # @email: y463213402@gmail.com r""" PGL ################################################ Reference: https://github.com/demonph10/PGL AAAI'2025: [Mind Individual Information! Principal Graph Learning for Multimedia Recommendation] """ import os import numpy as np import scipy.sparse as sp import torch import torch.nn as nn import torch.nn.functional as F from common.abstract_recommender import GeneralRecommender from sparsesvd import sparsesvd class PGL(GeneralRecommender): def __init__(self, config, dataset): super(PGL, self).__init__(config, dataset) self.mode = config['mode'] self.embedding_dim = config['embedding_size'] self.feat_embed_dim = config['feat_embed_dim'] self.knn_k = config['knn_k'] self.lambda_coeff = config['lambda_coeff'] self.n_layers = config['n_mm_layers'] self.n_ui_layers = config['n_ui_layers'] self.reg_weight = config['reg_weight'] self.mm_image_weight = config['mm_image_weight'] self.n_nodes = self.n_users + self.n_items self.sub_graph, self.mm_adj = None, None # load dataset info self.interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32) self.norm_adj = self.get_norm_adj_mat().to(self.device) self.edge_indices, self.edge_values = self.get_edge_info() self.edge_indices, self.edge_values = self.edge_indices.to(self.device), self.edge_values.to(self.device) self.edge_full_indices = torch.arange(self.edge_values.size(0)).to(self.device) self.user_text = nn.Embedding(self.n_users, self.embedding_dim) self.user_image = nn.Embedding(self.n_users, self.embedding_dim) nn.init.xavier_uniform_(self.user_image.weight) nn.init.xavier_uniform_(self.user_text.weight) dataset_path = os.path.abspath(config['data_path'] + config['dataset']) mm_adj_file = os.path.join(dataset_path,'mm_adj_freedomdsp_{}_{}.pt'.format(self.knn_k, int(10 * self.mm_image_weight))) if self.v_feat is not None: self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=False) self.image_trs = nn.Linear(self.v_feat.shape[1], self.feat_embed_dim) if self.t_feat is not None: self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=False) self.text_trs = nn.Linear(self.t_feat.shape[1], self.feat_embed_dim) if os.path.exists(mm_adj_file): self.mm_adj = torch.load(mm_adj_file) else: if self.v_feat is not None: indices, image_adj = self.get_knn_adj_mat(self.image_embedding.weight.detach()) self.mm_adj = image_adj if self.t_feat is not None: indices, text_adj = self.get_knn_adj_mat(self.text_embedding.weight.detach()) self.mm_adj = text_adj if self.v_feat is not None and self.t_feat is not None: self.mm_adj = self.mm_image_weight * image_adj + (1.0 - self.mm_image_weight) * text_adj del text_adj del image_adj torch.save(self.mm_adj, mm_adj_file) self.dropoutf = nn.Dropout(config['dropout']) def sparse_mx_to_torch_sparse_tensor(self, sparse_mx): """Convert a scipy sparse matrix to a torch sparse tensor.""" sparse_mx = sparse_mx.tocoo().astype(np.float32) indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)) values = torch.from_numpy(sparse_mx.data) shape = torch.Size(sparse_mx.shape) return torch.sparse.FloatTensor(indices, values, shape) def get_knn_adj_mat(self, mm_embeddings): context_norm = mm_embeddings.div(torch.norm(mm_embeddings, p=2, dim=-1, keepdim=True)) sim = torch.mm(context_norm, context_norm.transpose(1, 0)) _, knn_ind = torch.topk(sim, self.knn_k, dim=-1) adj_size = sim.size() del sim # construct sparse adj indices0 = torch.arange(knn_ind.shape[0]).to(self.device) indices0 = torch.unsqueeze(indices0, 1) indices0 = indices0.expand(-1, self.knn_k) indices = torch.stack((torch.flatten(indices0), torch.flatten(knn_ind)), 0) # norm return indices, self.compute_normalized_laplacian(indices, adj_size) def compute_normalized_laplacian(self, indices, adj_size): adj = torch.sparse.FloatTensor(indices, torch.ones_like(indices[0]), adj_size) row_sum = 1e-7 + torch.sparse.sum(adj, -1).to_dense() r_inv_sqrt = torch.pow(row_sum, -0.5) rows_inv_sqrt = r_inv_sqrt[indices[0]] cols_inv_sqrt = r_inv_sqrt[indices[1]] values = rows_inv_sqrt * cols_inv_sqrt return torch.sparse.FloatTensor(indices, values, adj_size) def get_norm_adj_mat(self): A = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32) inter_M = self.interaction_matrix inter_M_t = self.interaction_matrix.transpose() data_dict = dict(zip(zip(inter_M.row, inter_M.col + self.n_users), [1] * inter_M.nnz)) data_dict.update(dict(zip(zip(inter_M_t.row + self.n_users, inter_M_t.col), [1] * inter_M_t.nnz))) A._update(data_dict) # norm adj matrix sumArr = (A > 0).sum(axis=1) # add epsilon to avoid Devide by zero Warning diag = np.array(sumArr.flatten())[0] + 1e-7 diag = np.power(diag, -0.5) D = sp.diags(diag) L = D * A * D # covert norm_adj matrix to tensor L = sp.coo_matrix(L) row = L.row col = L.col i = torch.LongTensor(np.array([row, col])) data = torch.FloatTensor(L.data) if self.mode == 'global': self.sub_graph = self.global_subgraph_extraction(L) self.sub_graph = self.sparse_mx_to_torch_sparse_tensor(self.sub_graph).to(self.device) return torch.sparse.FloatTensor(i, data, torch.Size((self.n_nodes, self.n_nodes))) def global_subgraph_extraction(self, adj): norm_adj = adj.tocsc() ut, s, vt = sparsesvd(norm_adj, self.embedding_dim) # Get the top and bottom 25% of singular values num_top_bottom = int(0.25 * self.embedding_dim) top_singular_values = s[:num_top_bottom] bottom_singular_values = s[-num_top_bottom:] # Compute the product of the top and bottom singular values product_singular_values = top_singular_values * bottom_singular_values # Construct the sparse matrix from the product of singular values product_matrix = np.diag(product_singular_values) product_sparse_matrix = ut.T[:, :num_top_bottom] @ product_matrix @ vt[:num_top_bottom, :] product_sparse_matrix = sp.csr_matrix(product_sparse_matrix * (abs(product_sparse_matrix) >= 1e-3)) return product_sparse_matrix def alignment(self, x, y): user, item = self.interaction_matrix.nonzero() x, y = F.normalize(x, dim=-1), F.normalize(y, dim=-1) return (x[user] - y[item]).norm(p=2, dim=1).pow(2).mean() def uniformity(self, x, t=2): x = F.normalize(x, dim=-1) return torch.pdist(x, p=2).pow(2).mul(-t).exp().mean().log() def save(self): pass def pre_epoch_processing(self): if self.mode == 'local': # degree-sensitive edge pruning degree_len = int(self.edge_values.size(0) * 0.3) degree_idx = torch.multinomial(self.edge_values, degree_len) # random sample keep_indices = self.edge_indices[:, degree_idx] # norm values keep_values = self._normalize_adj_m(keep_indices, torch.Size((self.n_users, self.n_items))) all_values = torch.cat((keep_values, keep_values)) # update keep_indices to users/items+self.n_users keep_indices[1] += self.n_users all_indices = torch.cat((keep_indices, torch.flip(keep_indices, [0])), 1) self.sub_graph = torch.sparse.FloatTensor(all_indices, all_values, self.norm_adj.shape).to(self.device) def _normalize_adj_m(self, indices, adj_size): adj = torch.sparse.FloatTensor(indices, torch.ones_like(indices[0]), adj_size) row_sum = 1e-7 + torch.sparse.sum(adj, -1).to_dense() col_sum = 1e-7 + torch.sparse.sum(adj.t(), -1).to_dense() r_inv_sqrt = torch.pow(row_sum, -0.5) rows_inv_sqrt = r_inv_sqrt[indices[0]] c_inv_sqrt = torch.pow(col_sum, -0.5) cols_inv_sqrt = c_inv_sqrt[indices[1]] values = rows_inv_sqrt * cols_inv_sqrt return values def get_edge_info(self): rows = torch.from_numpy(self.interaction_matrix.row) cols = torch.from_numpy(self.interaction_matrix.col) edges = torch.stack([rows, cols]).type(torch.LongTensor) # edge normalized values values = self._normalize_adj_m(edges, torch.Size((self.n_users, self.n_items))) return edges, values def forward(self, adj): if self.v_feat is not None: image_feats = self.image_trs(self.image_embedding.weight) if self.t_feat is not None: text_feats = self.text_trs(self.text_embedding.weight) image_feats, text_feats = F.normalize(image_feats), F.normalize(text_feats) user_embeds = torch.cat([self.user_image.weight, self.user_text.weight], dim=1) item_embeds = torch.cat([image_feats, text_feats], dim=1) h = item_embeds for i in range(self.n_layers): h = torch.sparse.mm(self.mm_adj, h) ego_embeddings = torch.cat((user_embeds, item_embeds), dim=0) all_embeddings = [ego_embeddings] for i in range(self.n_ui_layers): side_embeddings = torch.sparse.mm(adj, ego_embeddings) ego_embeddings = side_embeddings all_embeddings += [ego_embeddings] all_embeddings = torch.stack(all_embeddings, dim=1) all_embeddings = all_embeddings.mean(dim=1, keepdim=False) u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0) return u_g_embeddings, i_g_embeddings + h def bpr_loss(self, users, pos_items, neg_items): pos_scores = torch.sum(torch.mul(users, pos_items), dim=1) neg_scores = torch.sum(torch.mul(users, neg_items), dim=1) maxi = F.logsigmoid(pos_scores - neg_scores) mf_loss = -torch.mean(maxi) return mf_loss def InfoNCE(self, view1, view2, temperature): view1, view2 = F.normalize(view1, dim=1), F.normalize(view2, dim=1) pos_score = (view1 * view2).sum(dim=-1) pos_score = torch.exp(pos_score / temperature) ttl_score = torch.matmul(view1, view2.transpose(0, 1)) ttl_score = torch.exp(ttl_score / temperature).sum(dim=1) cl_loss = -torch.log(pos_score / ttl_score) return torch.mean(cl_loss) def calculate_loss(self, interaction): users = interaction[0] pos_items = interaction[1] neg_items = interaction[2] ua_embeddings, ia_embeddings = self.forward(self.sub_graph) u_g_embeddings = ua_embeddings[users] pos_i_g_embeddings = ia_embeddings[pos_items] neg_i_g_embeddings = ia_embeddings[neg_items] batch_mf_loss = self.bpr_loss(u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings) cl_loss = (self.InfoNCE(self.dropoutf(u_g_embeddings), self.dropoutf(u_g_embeddings), 0.2) + self.InfoNCE(self.dropoutf(pos_i_g_embeddings), self.dropoutf(pos_i_g_embeddings), 0.2)) / 2 return batch_mf_loss + self.reg_weight * cl_loss def full_sort_predict(self, interaction): user = interaction[0] restore_user_e, restore_item_e = self.forward(self.norm_adj) u_embeddings = restore_user_e[user] # dot with all item embedding to accelerate scores = torch.matmul(u_embeddings, restore_item_e.transpose(0, 1)) return scores ================================================ FILE: src/models/selfcfed_lgn.py ================================================ # -*- coding: utf-8 -*- # @Time : 2021/05/17 # @Author : Zhou xin # @Email : enoche.chow@gmail.com r""" ################################################ Self-supervised CF Using the same implementation of LightGCN in BUIR Adding regularization on embeddings SELFCF_{ed}: embedding dropout """ import scipy.sparse as sp import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from common.encoders import LightGCN_Encoder from common.abstract_recommender import GeneralRecommender from common.loss import BPRLoss, EmbLoss, L2Loss class SELFCFED_LGN(GeneralRecommender): def __init__(self, config, dataset): super(SELFCFED_LGN, self).__init__(config, dataset) self.user_count = self.n_users self.item_count = self.n_items self.latent_size = config['embedding_size'] self.dropout = config['dropout'] self.reg_weight = config['reg_weight'] self.online_encoder = LightGCN_Encoder(config, dataset) self.predictor = nn.Linear(self.latent_size, self.latent_size) self.reg_loss = L2Loss() def forward(self, inputs): u_online, i_online = self.online_encoder(inputs) with torch.no_grad(): u_target, i_target = u_online.clone(), i_online.clone() u_target.detach() i_target.detach() u_target = F.dropout(u_target, self.dropout) i_target = F.dropout(i_target, self.dropout) return u_online, u_target, i_online, i_target @torch.no_grad() def get_embedding(self): u_online, i_online = self.online_encoder.get_embedding() return self.predictor(u_online), u_online, self.predictor(i_online), i_online def loss_fn(self, p, z): # negative cosine similarity return - F.cosine_similarity(p, z.detach(), dim=-1).mean() def calculate_loss(self, interaction): u_online, u_target, i_online, i_target = self.forward(interaction) reg_loss = self.reg_loss(u_online, i_online) u_online, i_online = self.predictor(u_online), self.predictor(i_online) loss_ui = self.loss_fn(u_online, i_target)/2 loss_iu = self.loss_fn(i_online, u_target)/2 return loss_ui + loss_iu + self.reg_weight * reg_loss def full_sort_predict(self, interaction): user = interaction[0] u_online, u_target, i_online, i_target = self.get_embedding() score_mat_ui = torch.matmul(u_online[user], i_target.transpose(0, 1)) score_mat_iu = torch.matmul(u_target[user], i_online.transpose(0, 1)) scores = score_mat_ui + score_mat_iu return scores ================================================ FILE: src/models/slmrec.py ================================================ # coding: utf-8 # # Updated by enoche # Paper: Self-supervised Learning for Multimedia Recommendation # Github: https://github.com/zltao/SLMRec # import torch from torch import nn import numpy as np import scipy.sparse as sp from torch_scatter import scatter from sklearn.cluster import KMeans from common.abstract_recommender import GeneralRecommender ## Only visual + text features ## class SLMRec(GeneralRecommender): def __init__(self, config, dataset): super(SLMRec, self).__init__(config, dataset) self.a_feat = None # no audio feature self.config = config self.infonce_criterion = nn.CrossEntropyLoss() self.__init_weight(dataset) def __init_weight(self, dataset): self.num_users = self.n_users self.num_items = self.n_items self.latent_dim = self.config['recdim'] self.n_layers = self.config['layer_num'] self.mm_fusion_mode = self.config['mm_fusion_mode'] self.temp = self.config['temp'] self.create_u_embeding_i() self.all_items = self.all_users = None train_interactions = dataset.inter_matrix(form='csr').astype(np.float32) coo = self.create_adj_mat(train_interactions).tocoo() indices = torch.LongTensor([coo.row.tolist(), coo.col.tolist()]) self.norm_adj = torch.sparse.FloatTensor(indices, torch.FloatTensor(coo.data), coo.shape) self.norm_adj = self.norm_adj.to(self.device) self.f = nn.Sigmoid() if self.config["ssl_task"] == "FAC": # Fine and Coarse self.g_i_iv = nn.Linear(self.latent_dim, self.latent_dim) self.g_v_iv = nn.Linear(self.latent_dim, self.latent_dim) self.g_iv_iva = nn.Linear(self.latent_dim, self.latent_dim) self.g_a_iva = nn.Linear(self.latent_dim, self.latent_dim) self.g_iva_ivat = nn.Linear(self.latent_dim, self.latent_dim // 2) self.g_t_ivat = nn.Linear(self.latent_dim, self.latent_dim // 2) nn.init.xavier_uniform_(self.g_i_iv.weight) nn.init.xavier_uniform_(self.g_v_iv.weight) nn.init.xavier_uniform_(self.g_iv_iva.weight) nn.init.xavier_uniform_(self.g_a_iva.weight) nn.init.xavier_uniform_(self.g_iva_ivat.weight) nn.init.xavier_uniform_(self.g_t_ivat.weight) self.ssl_temp = self.config["ssl_temp"] elif self.config["ssl_task"] in ["FD", "FD+FM"]: # Feature dropout self.ssl_criterion = nn.CrossEntropyLoss() self.ssl_temp = self.config["ssl_temp"] self.dropout_rate = self.config["dropout_rate"] self.dropout = nn.Dropout(p=self.dropout_rate) elif self.config["ssl_task"] == "FM": # Feature Masking self.ssl_criterion = nn.CrossEntropyLoss() self.ssl_temp = self.config["ssl_temp"] def compute(self): users_emb = self.embedding_user.weight items_emb = self.embedding_item.weight if self.v_feat is not None: self.v_dense_emb = self.v_dense(self.v_feat) # v=>id if self.config["dataset"] != "kwai": if self.a_feat is not None: self.a_dense_emb = self.a_dense(self.a_feat) # a=>id if self.t_feat is not None: self.t_dense_emb = self.t_dense(self.t_feat) # t=>id def compute_graph(u_emb, i_emb): all_emb = torch.cat([u_emb, i_emb]) embs = [all_emb] g_droped = self.norm_adj for _ in range(self.n_layers): all_emb = torch.sparse.mm(g_droped, all_emb) embs.append(all_emb) embs = torch.stack(embs, dim=1) light_out = torch.mean(embs, dim=1) return light_out self.i_emb = compute_graph(users_emb, items_emb) self.i_emb_u, self.i_emb_i = torch.split(self.i_emb, [self.num_users, self.num_items]) self.v_emb = compute_graph(users_emb, self.v_dense_emb) self.v_emb_u, self.v_emb_i = torch.split(self.v_emb, [self.num_users, self.num_items]) if self.config["dataset"] != "kwai": if self.a_feat is not None: self.a_emb = compute_graph(users_emb, self.a_dense_emb) self.a_emb_u, self.a_emb_i = torch.split(self.a_emb, [self.num_users, self.num_items]) if self.t_feat is not None: self.t_emb = compute_graph(users_emb, self.t_dense_emb) self.t_emb_u, self.t_emb_i = torch.split(self.t_emb, [self.num_users, self.num_items]) # multi - modal features fusion if self.config["dataset"] == "kwai": user = self.embedding_user_after_GCN( self.mm_fusion([self.i_emb_u, self.v_emb_u])) item = self.embedding_item_after_GCN( self.mm_fusion([self.i_emb_i, self.v_emb_i])) else: user = self.embedding_user_after_GCN(self.mm_fusion([self.i_emb_u, self.v_emb_u, self.t_emb_u])) item = self.embedding_item_after_GCN(self.mm_fusion([self.i_emb_i, self.v_emb_i, self.t_emb_i])) return user, item def feature_dropout(self, users_idx, items_idx): users_emb = self.embedding_user.weight items_emb = self.embedding_item.weight v_dense = self.v_dense_emb if self.config["data.input.dataset"] != "kwai": a_dense = self.a_dense_emb t_dense = self.t_dense_emb def compute_graph(u_emb, i_emb): all_emb = torch.cat([u_emb, i_emb]) ego_emb_sub_1 = all_emb ego_emb_sub_2 = all_emb # embs = [all_emb] embs_sub_1 = [ego_emb_sub_1] embs_sub_2 = [ego_emb_sub_2] g_droped = self.norm_adj for _ in range(self.n_layers): ego_emb_sub_1 = self.dropout(torch.sparse.mm(g_droped, ego_emb_sub_1)) ego_emb_sub_2 = self.dropout(torch.sparse.mm(g_droped, ego_emb_sub_2)) embs_sub_2.append(ego_emb_sub_1) embs_sub_1.append(ego_emb_sub_2) embs_sub_1 = torch.stack(embs_sub_1, dim=1) embs_sub_2 = torch.stack(embs_sub_2, dim=1) light_out_sub_1 = torch.mean(embs_sub_1, dim=1) light_out_sub_2 = torch.mean(embs_sub_2, dim=1) users_sub_1, items_sub_1 = torch.split(light_out_sub_1, [self.num_users, self.num_items]) users_sub_2, items_sub_2 = torch.split(light_out_sub_2, [self.num_users, self.num_items]) return users_sub_1[users_idx], items_sub_1[items_idx], users_sub_2[users_idx], items_sub_2[items_idx] i_emb_u_sub_1, i_emb_i_sub_1, i_emb_u_sub_2, i_emb_i_sub_2 = compute_graph(users_emb, items_emb) v_emb_u_sub_1, v_emb_i_sub_1, v_emb_u_sub_2, v_emb_i_sub_2 = compute_graph(users_emb, v_dense) if self.config["data.input.dataset"] != "kwai": a_emb_u_sub_1, a_emb_i_sub_1, a_emb_u_sub_2, a_emb_i_sub_2 = compute_graph(users_emb, a_dense) t_emb_u_sub_1, t_emb_i_sub_1, t_emb_u_sub_2, t_emb_i_sub_2 = compute_graph(users_emb, t_dense) if self.config["data.input.dataset"] == "kwai": users_sub_1 = self.embedding_user_after_GCN(self.mm_fusion([i_emb_u_sub_1, v_emb_u_sub_1])) items_sub_1 = self.embedding_item_after_GCN(self.mm_fusion([i_emb_i_sub_1, v_emb_i_sub_1])) users_sub_2 = self.embedding_user_after_GCN(self.mm_fusion([i_emb_u_sub_2, v_emb_u_sub_2])) items_sub_2 = self.embedding_item_after_GCN(self.mm_fusion([i_emb_i_sub_2, v_emb_i_sub_2])) else: users_sub_1 = self.embedding_user_after_GCN( self.mm_fusion([i_emb_u_sub_1, v_emb_u_sub_1, a_emb_u_sub_1, t_emb_u_sub_1])) items_sub_1 = self.embedding_item_after_GCN( self.mm_fusion([i_emb_i_sub_1, v_emb_i_sub_1, a_emb_i_sub_1, t_emb_i_sub_1])) users_sub_2 = self.embedding_user_after_GCN( self.mm_fusion([i_emb_u_sub_2, v_emb_u_sub_2, a_emb_u_sub_2, t_emb_u_sub_2])) items_sub_2 = self.embedding_item_after_GCN( self.mm_fusion([i_emb_i_sub_2, v_emb_i_sub_2, a_emb_i_sub_2, t_emb_i_sub_2])) users_sub_1 = torch.nn.functional.normalize(users_sub_1, dim=1) users_sub_2 = torch.nn.functional.normalize(users_sub_2, dim=1) items_sub_1 = torch.nn.functional.normalize(items_sub_1, dim=1) items_sub_2 = torch.nn.functional.normalize(items_sub_2, dim=1) logits_user = torch.mm(users_sub_1, users_sub_2.T) logits_user /= self.ssl_temp labels_user = torch.tensor(list(range(users_sub_2.shape[0]))).to(self.device) ssl_loss_user = self.ssl_criterion(logits_user, labels_user) logits_item = torch.mm(items_sub_1, items_sub_2.T) logits_item /= self.ssl_temp labels_item = torch.tensor(list(range(items_sub_2.shape[0]))).to(self.device) ssl_loss_item = self.ssl_criterion(logits_item, labels_item) return ssl_loss_user + ssl_loss_item def feature_masking(self, users_idx, items_idx, dropout=False): users_emb = self.embedding_user.weight items_emb = self.embedding_item.weight rand_range = 4 if self.config["data.input.dataset"] != "kwai" else 2 rand_idx1 = np.random.randint(rand_range) rand_idx2 = 0 while True: rand_idx2 = np.random.randint(rand_range) if rand_idx2 != rand_idx1: break v_dense = self.v_dense_emb if self.config["data.input.dataset"] != "kwai": a_dense = self.a_dense_emb t_dense = self.t_dense_emb def compute_graph(u_emb, i_emb, idx): all_emb_1 = torch.cat([u_emb, i_emb if rand_idx1 != idx else torch.zeros((self.num_items, self.latent_dim)).to( self.device)]) all_emb_2 = torch.cat([u_emb, i_emb if rand_idx2 != idx else torch.zeros((self.num_items, self.latent_dim)).to( self.device)]) ego_emb_sub_1 = all_emb_1 ego_emb_sub_2 = all_emb_2 embs_sub_1 = [ego_emb_sub_1] embs_sub_2 = [ego_emb_sub_2] g_droped = self.norm_adj for _ in range(self.n_layers): ego_emb_sub_1 = torch.sparse.mm(g_droped, ego_emb_sub_1) ego_emb_sub_2 = torch.sparse.mm(g_droped, ego_emb_sub_2) if dropout: ego_emb_sub_1 = self.dropout(ego_emb_sub_1) ego_emb_sub_2 = self.dropout(ego_emb_sub_2) embs_sub_2.append(ego_emb_sub_1) embs_sub_1.append(ego_emb_sub_2) embs_sub_1 = torch.stack(embs_sub_1, dim=1) embs_sub_2 = torch.stack(embs_sub_2, dim=1) light_out_sub_1 = torch.mean(embs_sub_1, dim=1) light_out_sub_2 = torch.mean(embs_sub_2, dim=1) users_sub_1, items_sub_1 = torch.split(light_out_sub_1, [self.num_users, self.num_items]) users_sub_2, items_sub_2 = torch.split(light_out_sub_2, [self.num_users, self.num_items]) return users_sub_1[users_idx], items_sub_1[items_idx], users_sub_2[users_idx], items_sub_2[items_idx] i_emb_u_sub_1, i_emb_i_sub_1, i_emb_u_sub_2, i_emb_i_sub_2 = compute_graph(users_emb, items_emb, idx=3) v_emb_u_sub_1, v_emb_i_sub_1, v_emb_u_sub_2, v_emb_i_sub_2 = compute_graph(users_emb, v_dense, idx=0) if self.config["data.input.dataset"] != "kwai": a_emb_u_sub_1, a_emb_i_sub_1, a_emb_u_sub_2, a_emb_i_sub_2 = compute_graph(users_emb, a_dense, idx=1) t_emb_u_sub_1, t_emb_i_sub_1, t_emb_u_sub_2, t_emb_i_sub_2 = compute_graph(users_emb, t_dense, idx=2) if self.config["data.input.dataset"] == "kwai": users_sub_1 = self.embedding_user_after_GCN(self.mm_fusion([i_emb_u_sub_1, v_emb_u_sub_1])) items_sub_1 = self.embedding_item_after_GCN(self.mm_fusion([i_emb_i_sub_1, v_emb_i_sub_1])) users_sub_2 = self.embedding_user_after_GCN(self.mm_fusion([i_emb_u_sub_2, v_emb_u_sub_2])) items_sub_2 = self.embedding_item_after_GCN(self.mm_fusion([i_emb_i_sub_2, v_emb_i_sub_2])) else: users_sub_1 = self.embedding_user_after_GCN( self.mm_fusion([i_emb_u_sub_1, v_emb_u_sub_1, a_emb_u_sub_1, t_emb_u_sub_1])) items_sub_1 = self.embedding_item_after_GCN( self.mm_fusion([i_emb_i_sub_1, v_emb_i_sub_1, a_emb_i_sub_1, t_emb_i_sub_1])) users_sub_2 = self.embedding_user_after_GCN( self.mm_fusion([i_emb_u_sub_2, v_emb_u_sub_2, a_emb_u_sub_2, t_emb_u_sub_2])) items_sub_2 = self.embedding_item_after_GCN( self.mm_fusion([i_emb_i_sub_2, v_emb_i_sub_2, a_emb_i_sub_2, t_emb_i_sub_2])) users_sub_1 = torch.nn.functional.normalize(users_sub_1, dim=1) users_sub_2 = torch.nn.functional.normalize(users_sub_2, dim=1) items_sub_1 = torch.nn.functional.normalize(items_sub_1, dim=1) items_sub_2 = torch.nn.functional.normalize(items_sub_2, dim=1) logits_user = torch.mm(users_sub_1, users_sub_2.T) logits_user /= self.ssl_temp labels_user = torch.tensor(list(range(users_sub_2.shape[0]))).to(self.device) ssl_loss_user = self.ssl_criterion(logits_user, labels_user) logits_item = torch.mm(items_sub_1, items_sub_2.T) logits_item /= self.ssl_temp labels_item = torch.tensor(list(range(items_sub_2.shape[0]))).to(self.device) ssl_loss_item = self.ssl_criterion(logits_item, labels_item) return ssl_loss_user + ssl_loss_item def fac(self, idx): x_i_iv = self.g_i_iv(self.i_emb_i[idx]) x_v_iv = self.g_v_iv(self.v_emb_i[idx]) v_logits = torch.mm(x_i_iv, x_v_iv.T) v_logits /= self.ssl_temp v_labels = torch.tensor(list(range(x_i_iv.shape[0]))).to(self.device) v_loss = self.infonce_criterion(v_logits, v_labels) if self.config["dataset"] != "kwai": x_iv_iva = self.g_iv_iva(x_i_iv) # x_a_iva = self.g_a_iva(self.a_emb_i[idx]) # a_logits = torch.mm(x_iv_iva, x_a_iva.T) # a_logits /= self.ssl_temp # a_labels = torch.tensor(list(range(x_iv_iva.shape[0]))).to(self.device) # a_loss = self.infonce_criterion(a_logits, a_labels) # x_iva_ivat = self.g_iva_ivat(x_iv_iva) x_t_ivat = self.g_t_ivat(self.t_emb_i[idx]) t_logits = torch.mm(x_iva_ivat, x_t_ivat.T) t_logits /= self.ssl_temp t_labels = torch.tensor(list(range(x_iva_ivat.shape[0]))).to(self.device) t_loss = self.infonce_criterion(t_logits, t_labels) #return v_loss + a_loss + t_loss return v_loss + t_loss else: return v_loss def full_sort_predict(self, interaction, candidate_items=None): users = interaction[0] users_emb = self.all_users[users] if candidate_items is None: items_emb = self.all_items else: items_emb = self.all_items[torch.tensor(candidate_items).long().to(self.device)] scores = torch.matmul(users_emb, items_emb.t()) return self.f(scores) def getEmbedding(self, users, pos_items, neg_items): self.all_users, self.all_items = self.compute() users_emb = self.all_users[users] pos_emb = self.all_items[pos_items] users_emb_ego = self.embedding_user(users) pos_emb_ego = self.embedding_item(pos_items) if neg_items is None: neg_emb_ego = neg_emb = None else: neg_emb = self.all_items[neg_items] neg_emb_ego = self.embedding_item(neg_items) return users_emb, pos_emb, neg_emb, users_emb_ego, pos_emb_ego, neg_emb_ego def calculate_loss(self, interaction): # multi-task loss users, pos = interaction[0], interaction[1] main_loss = self.infonce(users, pos) ssl_loss = self.compute_ssl(users, pos) return main_loss + self.config['ssl_alpha'] * ssl_loss def ssl_loss(self, users, pos): # compute ssl loss self.getEmbedding(users.long(), pos.long(), None) return self.compute_ssl(users, pos) def compute_ssl(self, users, items): if self.config["ssl_task"] == "FAC": return self.fac(items) elif self.config["ssl_task"] == "FD": return self.feature_dropout(users.long(), items.long()) elif self.config["ssl_task"] == "FM": return self.feature_masking(users.long(), items.long()) elif self.config["ssl_task"] == "FD+FM": return self.feature_masking(users.long(), items.long(), dropout=True) def forward(self, users, items): all_users, all_items = self.compute() users_emb = all_users[users] items_emb = all_items[items] inner_pro = torch.mul(users_emb, items_emb) gamma = torch.sum(inner_pro, dim=1) return gamma.detach() def mm_fusion(self, reps: list): if self.mm_fusion_mode == "concat": z = torch.cat(reps, dim=1) elif self.mm_fusion_mode == "mean": z = torch.mean(torch.stack(reps), dim=0) return z def infonce(self, users, pos): (users_emb, pos_emb, neg_emb, userEmb0, posEmb0, negEmb0) = self.getEmbedding(users.long(), pos.long(), None) users_emb = torch.nn.functional.normalize(users_emb, dim=1) pos_emb = torch.nn.functional.normalize(pos_emb, dim=1) logits = torch.mm(users_emb, pos_emb.T) logits /= self.temp labels = torch.tensor(list(range(users_emb.shape[0]))).to(self.device) return self.infonce_criterion(logits, labels) def create_u_embeding_i(self): self.embedding_user = torch.nn.Embedding(num_embeddings=self.num_users, embedding_dim=self.latent_dim) self.embedding_item = torch.nn.Embedding(num_embeddings=self.num_items, embedding_dim=self.latent_dim) if self.config["init"] == "xavier": nn.init.xavier_uniform_(self.embedding_user.weight, gain=1) nn.init.xavier_uniform_(self.embedding_item.weight, gain=1) elif self.config["init"] == "normal": nn.init.normal_(self.embedding_user.weight, std=0.1) nn.init.normal_(self.embedding_item_ID.weight, std=0.1) # load features, updated by enoche mul_modal_cnt = 0 if self.v_feat is not None: self.v_feat = torch.nn.functional.normalize(self.v_feat, dim=1) self.v_dense = nn.Linear(self.v_feat.shape[1], self.latent_dim) nn.init.xavier_uniform_(self.v_dense.weight) mul_modal_cnt += 1 if self.t_feat is not None: self.t_feat = torch.nn.functional.normalize(self.t_feat, dim=1) self.t_dense = nn.Linear(self.t_feat.shape[1], self.latent_dim) nn.init.xavier_uniform_(self.t_dense.weight) mul_modal_cnt += 1 # if self.config["dataset"] != "kwai": # if self.a_feat is not None: # self.a_feat = torch.nn.functional.normalize(self.a_feat, dim=1) # if self.config["dataset"] == "tiktok": # self.words_tensor = self.dataset.words_tensor.to(self.device) # self.word_embedding = torch.nn.Embedding(11574, 128).to(self.device) # torch.nn.init.xavier_normal_(self.word_embedding.weight) # self.t_feat = scatter(self.word_embedding(self.words_tensor[1]), self.words_tensor[0], reduce='mean', # dim=0).to(self.device) # else: # self.t_feat = torch.nn.functional.normalize(self.dataset.t_feat.to(self.device).float(), dim=1) # visual feature dense # if self.config["data.input.dataset"] != "kwai": # # acoustic feature dense # self.a_dense = nn.Linear(self.a_feat.shape[1], self.latent_dim) # # textual feature dense # self.t_dense = nn.Linear(self.t_feat.shape[1], self.latent_dim) self.item_feat_dim = self.latent_dim * (mul_modal_cnt + 1) # nn.init.xavier_uniform_(self.v_dense.weight) # if self.config["data.input.dataset"] != "kwai": # nn.init.xavier_uniform_(self.a_dense.weight) # nn.init.xavier_uniform_(self.t_dense.weight) self.embedding_item_after_GCN = nn.Linear(self.item_feat_dim, self.latent_dim) self.embedding_user_after_GCN = nn.Linear(self.item_feat_dim, self.latent_dim) nn.init.xavier_uniform_(self.embedding_item_after_GCN.weight) nn.init.xavier_uniform_(self.embedding_user_after_GCN.weight) def create_adj_mat(self, interaction_csr): user_np, item_np = interaction_csr.nonzero() # user_list, item_list = self.dataset.get_train_interactions() # user_np = np.array(user_list, dtype=np.int32) # item_np = np.array(item_list, dtype=np.int32) ratings = np.ones_like(user_np, dtype=np.float32) n_nodes = self.num_users + self.num_items tmp_adj = sp.csr_matrix((ratings, (user_np, item_np + self.num_users)), shape=(n_nodes, n_nodes)) adj_mat = tmp_adj + tmp_adj.T def normalized_adj_single(adj): rowsum = np.array(adj.sum(1)) d_inv = np.power(rowsum, -1).flatten() d_inv[np.isinf(d_inv)] = 0. d_mat_inv = sp.diags(d_inv) norm_adj = d_mat_inv.dot(adj) print('generate single-normalized adjacency matrix.') return norm_adj.tocoo() adj_type = self.config['adj_type'] if adj_type == 'plain': adj_matrix = adj_mat print('use the plain adjacency matrix') elif adj_type == 'norm': adj_matrix = normalized_adj_single(adj_mat + sp.eye(adj_mat.shape[0])) print('use the normalized adjacency matrix') elif adj_type == 'gcmc': adj_matrix = normalized_adj_single(adj_mat) print('use the gcmc adjacency matrix') elif adj_type == 'pre': # pre adjcency matrix rowsum = np.array(adj_mat.sum(1)) + 1e-08 # avoid RuntimeWarning: divide by zero encountered in power d_inv = np.power(rowsum, -0.5).flatten() d_inv[np.isinf(d_inv)] = 0. d_mat_inv = sp.diags(d_inv) norm_adj_tmp = d_mat_inv.dot(adj_mat) adj_matrix = norm_adj_tmp.dot(d_mat_inv) print('use the pre adjcency matrix') else: mean_adj = normalized_adj_single(adj_mat) adj_matrix = mean_adj + sp.eye(mean_adj.shape[0]) print('use the mean adjacency matrix') return adj_matrix ================================================ FILE: src/models/smore.py ================================================ # coding: utf-8 # rongqing001@e.ntu.edu.sg r""" SMORE - Multi-modal Recommender System Reference: ACM WSDM 2025: Spectrum-based Modality Representation Fusion Graph Convolutional Network for Multimodal Recommendation Reference Code: https://github.com/kennethorq/SMORE """ import os import numpy as np import scipy.sparse as sp import torch import torch.nn as nn import torch.nn.functional as F import sys import math from common.abstract_recommender import GeneralRecommender from utils.utils import build_sim, compute_normalized_laplacian, build_knn_neighbourhood, build_knn_normalized_graph class SMORE(GeneralRecommender): def __init__(self, config, dataset): super(SMORE, self).__init__(config, dataset) self.sparse = True self.cl_loss = config['cl_loss'] self.n_ui_layers = config['n_ui_layers'] self.embedding_dim = config['embedding_size'] self.n_layers = config['n_layers'] self.reg_weight = config['reg_weight'] self.image_knn_k = config['image_knn_k'] self.text_knn_k = config['text_knn_k'] self.dropout_rate = config['dropout_rate'] self.dropout = nn.Dropout(p=self.dropout_rate) self.interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32) self.user_embedding = nn.Embedding(self.n_users, self.embedding_dim) self.item_id_embedding = nn.Embedding(self.n_items, self.embedding_dim) nn.init.xavier_uniform_(self.user_embedding.weight) nn.init.xavier_uniform_(self.item_id_embedding.weight) dataset_path = os.path.abspath(config['data_path'] + config['dataset']) image_adj_file = os.path.join(dataset_path, 'image_adj_{}_{}.pt'.format(self.image_knn_k, self.sparse)) text_adj_file = os.path.join(dataset_path, 'text_adj_{}_{}.pt'.format(self.text_knn_k, self.sparse)) self.norm_adj = self.get_adj_mat() self.R_sprse_mat = self.R self.R = self.sparse_mx_to_torch_sparse_tensor(self.R).float().to(self.device) self.norm_adj = self.sparse_mx_to_torch_sparse_tensor(self.norm_adj).float().to(self.device) if self.v_feat is not None: self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=False) if os.path.exists(image_adj_file): image_adj = torch.load(image_adj_file) else: image_adj = build_sim(self.image_embedding.weight.detach()) image_adj = build_knn_normalized_graph(image_adj, topk=self.image_knn_k, is_sparse=self.sparse, norm_type='sym') torch.save(image_adj, image_adj_file) self.image_original_adj = image_adj.cuda() if self.t_feat is not None: self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=False) if os.path.exists(text_adj_file): text_adj = torch.load(text_adj_file) else: text_adj = build_sim(self.text_embedding.weight.detach()) text_adj = build_knn_normalized_graph(text_adj, topk=self.text_knn_k, is_sparse=self.sparse, norm_type='sym') torch.save(text_adj, text_adj_file) self.text_original_adj = text_adj.cuda() self.fusion_adj = self.max_pool_fusion() if self.v_feat is not None: self.image_trs = nn.Linear(self.v_feat.shape[1], self.embedding_dim) if self.t_feat is not None: self.text_trs = nn.Linear(self.t_feat.shape[1], self.embedding_dim) self.softmax = nn.Softmax(dim=-1) self.query_v = nn.Sequential( nn.Linear(self.embedding_dim, self.embedding_dim), nn.Tanh(), nn.Linear(self.embedding_dim, self.embedding_dim, bias=False) ) self.query_t = nn.Sequential( nn.Linear(self.embedding_dim, self.embedding_dim), nn.Tanh(), nn.Linear(self.embedding_dim, self.embedding_dim, bias=False) ) self.gate_v = nn.Sequential( nn.Linear(self.embedding_dim, self.embedding_dim), nn.Sigmoid() ) self.gate_t = nn.Sequential( nn.Linear(self.embedding_dim, self.embedding_dim), nn.Sigmoid() ) self.gate_f = nn.Sequential( nn.Linear(self.embedding_dim, self.embedding_dim), nn.Sigmoid() ) self.gate_image_prefer = nn.Sequential( nn.Linear(self.embedding_dim, self.embedding_dim), nn.Sigmoid() ) self.gate_text_prefer = nn.Sequential( nn.Linear(self.embedding_dim, self.embedding_dim), nn.Sigmoid() ) self.gate_fusion_prefer = nn.Sequential( nn.Linear(self.embedding_dim, self.embedding_dim), nn.Sigmoid() ) self.image_complex_weight = nn.Parameter(torch.randn(1, self.embedding_dim // 2 + 1, 2, dtype=torch.float32)) self.text_complex_weight = nn.Parameter(torch.randn(1, self.embedding_dim // 2 + 1, 2, dtype=torch.float32)) self.fusion_complex_weight = nn.Parameter(torch.randn(1, self.embedding_dim // 2 + 1, 2, dtype=torch.float32)) def pre_epoch_processing(self): pass def max_pool_fusion(self): image_adj = self.image_original_adj.coalesce() text_adj = self.text_original_adj.coalesce() image_indices = image_adj.indices().to(self.device) image_values = image_adj.values().to(self.device) text_indices = text_adj.indices().to(self.device) text_values = text_adj.values().to(self.device) combined_indices = torch.cat((image_indices, text_indices), dim=1) combined_indices, unique_idx = torch.unique(combined_indices, dim=1, return_inverse=True) combined_values_image = torch.full((combined_indices.size(1),), float('-inf')).to(self.device) combined_values_text = torch.full((combined_indices.size(1),), float('-inf')).to(self.device) combined_values_image[unique_idx[:image_indices.size(1)]] = image_values combined_values_text[unique_idx[image_indices.size(1):]] = text_values combined_values, _ = torch.max(torch.stack((combined_values_image, combined_values_text)), dim=0) fusion_adj = torch.sparse.FloatTensor(combined_indices, combined_values, image_adj.size()).coalesce() return fusion_adj def get_adj_mat(self): adj_mat = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32) adj_mat = adj_mat.tolil() R = self.interaction_matrix.tolil() adj_mat[:self.n_users, self.n_users:] = R adj_mat[self.n_users:, :self.n_users] = R.T adj_mat = adj_mat.todok() def normalized_adj_single(adj): rowsum = np.array(adj.sum(1)) d_inv = np.power(rowsum, -0.5).flatten() d_inv[np.isinf(d_inv)] = 0. d_mat_inv = sp.diags(d_inv) norm_adj = d_mat_inv.dot(adj_mat) norm_adj = norm_adj.dot(d_mat_inv) return norm_adj.tocoo() norm_adj_mat = normalized_adj_single(adj_mat) norm_adj_mat = norm_adj_mat.tolil() self.R = norm_adj_mat[:self.n_users, self.n_users:] return norm_adj_mat.tocsr() def sparse_mx_to_torch_sparse_tensor(self, sparse_mx): """Convert a scipy sparse matrix to a torch sparse tensor.""" sparse_mx = sparse_mx.tocoo().astype(np.float32) indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)) values = torch.from_numpy(sparse_mx.data) shape = torch.Size(sparse_mx.shape) return torch.sparse.FloatTensor(indices, values, shape) def spectrum_convolution(self, image_embeds, text_embeds): """ Modality Denoising & Cross-Modality Fusion """ image_fft = torch.fft.rfft(image_embeds, dim=1, norm='ortho') text_fft = torch.fft.rfft(text_embeds, dim=1, norm='ortho') image_complex_weight = torch.view_as_complex(self.image_complex_weight) text_complex_weight = torch.view_as_complex(self.text_complex_weight) fusion_complex_weight = torch.view_as_complex(self.fusion_complex_weight) # Uni-modal Denoising image_conv = torch.fft.irfft(image_fft * image_complex_weight, n=image_embeds.shape[1], dim=1, norm='ortho') text_conv = torch.fft.irfft(text_fft * text_complex_weight, n=text_embeds.shape[1], dim=1, norm='ortho') # Cross-modality fusion fusion_conv = torch.fft.irfft(text_fft * image_fft * fusion_complex_weight, n=text_embeds.shape[1], dim=1, norm='ortho') return image_conv, text_conv, fusion_conv def forward(self, adj, train=False): if self.v_feat is not None: image_feats = self.image_trs(self.image_embedding.weight) if self.t_feat is not None: text_feats = self.text_trs(self.text_embedding.weight) # Spectrum Modality Fusion image_conv, text_conv, fusion_conv = self.spectrum_convolution(image_feats, text_feats) image_item_embeds = torch.multiply(self.item_id_embedding.weight, self.gate_v(image_conv)) text_item_embeds = torch.multiply(self.item_id_embedding.weight, self.gate_t(text_conv)) fusion_item_embeds = torch.multiply(self.item_id_embedding.weight, self.gate_f(fusion_conv)) # User-Item (Behavioral) View item_embeds = self.item_id_embedding.weight user_embeds = self.user_embedding.weight ego_embeddings = torch.cat([user_embeds, item_embeds], dim=0) all_embeddings = [ego_embeddings] for i in range(self.n_ui_layers): side_embeddings = torch.sparse.mm(adj, ego_embeddings) ego_embeddings = side_embeddings all_embeddings += [ego_embeddings] all_embeddings = torch.stack(all_embeddings, dim=1) all_embeddings = all_embeddings.mean(dim=1, keepdim=False) content_embeds = all_embeddings # Item-Item Modality Specific and Fusion views # Image-view if self.sparse: for i in range(self.n_layers): image_item_embeds = torch.sparse.mm(self.image_original_adj, image_item_embeds) else: for i in range(self.n_layers): image_item_embeds = torch.mm(self.image_original_adj, image_item_embeds) image_user_embeds = torch.sparse.mm(self.R, image_item_embeds) image_embeds = torch.cat([image_user_embeds, image_item_embeds], dim=0) # Text-view if self.sparse: for i in range(self.n_layers): text_item_embeds = torch.sparse.mm(self.text_original_adj, text_item_embeds) else: for i in range(self.n_layers): text_item_embeds = torch.mm(self.text_original_adj, text_item_embeds) text_user_embeds = torch.sparse.mm(self.R, text_item_embeds) text_embeds = torch.cat([text_user_embeds, text_item_embeds], dim=0) # Fusion-view if self.sparse: for i in range(self.n_layers): fusion_item_embeds = torch.sparse.mm(self.fusion_adj, fusion_item_embeds) else: for i in range(self.n_layers): fusion_item_embeds = torch.mm(self.fusion_adj, fusion_item_embeds) fusion_user_embeds = torch.sparse.mm(self.R, fusion_item_embeds) fusion_embeds = torch.cat([fusion_user_embeds, fusion_item_embeds], dim=0) # Modality-aware Preference Module fusion_att_v, fusion_att_t = self.query_v(fusion_embeds), self.query_t(fusion_embeds) fusion_soft_v = self.softmax(fusion_att_v) agg_image_embeds = fusion_soft_v * image_embeds fusion_soft_t = self.softmax(fusion_att_t) agg_text_embeds = fusion_soft_t * text_embeds image_prefer = self.gate_image_prefer(content_embeds) text_prefer = self.gate_text_prefer(content_embeds) fusion_prefer = self.gate_fusion_prefer(content_embeds) image_prefer, text_prefer, fusion_prefer = self.dropout(image_prefer), self.dropout(text_prefer), self.dropout(fusion_prefer) agg_image_embeds = torch.multiply(image_prefer, agg_image_embeds) agg_text_embeds = torch.multiply(text_prefer, agg_text_embeds) fusion_embeds = torch.multiply(fusion_prefer, fusion_embeds) side_embeds = torch.mean(torch.stack([agg_image_embeds, agg_text_embeds, fusion_embeds]), dim=0) all_embeds = content_embeds + side_embeds all_embeddings_users, all_embeddings_items = torch.split(all_embeds, [self.n_users, self.n_items], dim=0) if train: return all_embeddings_users, all_embeddings_items, side_embeds, content_embeds return all_embeddings_users, all_embeddings_items def bpr_loss(self, users, pos_items, neg_items): pos_scores = torch.sum(torch.mul(users, pos_items), dim=1) neg_scores = torch.sum(torch.mul(users, neg_items), dim=1) regularizer = 1. / 2 * (users ** 2).sum() + 1. / 2 * (pos_items ** 2).sum() + 1. / 2 * (neg_items ** 2).sum() regularizer = regularizer / self.batch_size maxi = F.logsigmoid(pos_scores - neg_scores) mf_loss = -torch.mean(maxi) emb_loss = self.reg_weight * regularizer reg_loss = 0.0 return mf_loss, emb_loss, reg_loss def InfoNCE(self, view1, view2, temperature): view1, view2 = F.normalize(view1, dim=1), F.normalize(view2, dim=1) pos_score = (view1 * view2).sum(dim=-1) pos_score = torch.exp(pos_score / temperature) ttl_score = torch.matmul(view1, view2.transpose(0, 1)) ttl_score = torch.exp(ttl_score / temperature).sum(dim=1) cl_loss = -torch.log(pos_score / ttl_score) return torch.mean(cl_loss) def calculate_loss(self, interaction): users = interaction[0] pos_items = interaction[1] neg_items = interaction[2] ua_embeddings, ia_embeddings, side_embeds, content_embeds = self.forward( self.norm_adj, train=True) u_g_embeddings = ua_embeddings[users] pos_i_g_embeddings = ia_embeddings[pos_items] neg_i_g_embeddings = ia_embeddings[neg_items] batch_mf_loss, batch_emb_loss, batch_reg_loss = self.bpr_loss(u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings) side_embeds_users, side_embeds_items = torch.split(side_embeds, [self.n_users, self.n_items], dim=0) content_embeds_user, content_embeds_items = torch.split(content_embeds, [self.n_users, self.n_items], dim=0) cl_loss = self.InfoNCE(side_embeds_items[pos_items], content_embeds_items[pos_items], 0.2) + self.InfoNCE( side_embeds_users[users], content_embeds_user[users], 0.2) return batch_mf_loss + batch_emb_loss + batch_reg_loss + self.cl_loss * cl_loss def full_sort_predict(self, interaction): user = interaction[0] restore_user_e, restore_item_e = self.forward(self.norm_adj) u_embeddings = restore_user_e[user] # dot with all item embedding to accelerate scores = torch.matmul(u_embeddings, restore_item_e.transpose(0, 1)) return scores ================================================ FILE: src/models/vbpr.py ================================================ # coding: utf-8 # @email: enoche.chow@gmail.com r""" VBPR -- Recommended version ################################################ Reference: VBPR: Visual Bayesian Personalized Ranking from Implicit Feedback -Ruining He, Julian McAuley. AAAI'16 """ import numpy as np import os import torch import torch.nn as nn from common.abstract_recommender import GeneralRecommender from common.loss import BPRLoss, EmbLoss from common.init import xavier_normal_initialization import torch.nn.functional as F class VBPR(GeneralRecommender): r"""BPR is a basic matrix factorization model that be trained in the pairwise way. """ def __init__(self, config, dataloader): super(VBPR, self).__init__(config, dataloader) # load parameters info self.u_embedding_size = self.i_embedding_size = config['embedding_size'] self.reg_weight = config['reg_weight'] # float32 type: the weight decay for l2 normalizaton # define layers and loss self.u_embedding = nn.Parameter(nn.init.xavier_uniform_(torch.empty(self.n_users, self.u_embedding_size * 2))) self.i_embedding = nn.Parameter(nn.init.xavier_uniform_(torch.empty(self.n_items, self.i_embedding_size))) if self.v_feat is not None and self.t_feat is not None: self.item_raw_features = torch.cat((self.t_feat, self.v_feat), -1) elif self.v_feat is not None: self.item_raw_features = self.v_feat else: self.item_raw_features = self.t_feat self.item_linear = nn.Linear(self.item_raw_features.shape[1], self.i_embedding_size) self.loss = BPRLoss() self.reg_loss = EmbLoss() # parameters initialization self.apply(xavier_normal_initialization) def get_user_embedding(self, user): r""" Get a batch of user embedding tensor according to input user's id. Args: user (torch.LongTensor): The input tensor that contains user's id, shape: [batch_size, ] Returns: torch.FloatTensor: The embedding tensor of a batch of user, shape: [batch_size, embedding_size] """ return self.u_embedding[user, :] def get_item_embedding(self, item): r""" Get a batch of item embedding tensor according to input item's id. Args: item (torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ] Returns: torch.FloatTensor: The embedding tensor of a batch of item, shape: [batch_size, embedding_size] """ return self.item_embedding[item, :] def forward(self, dropout=0.0): item_embeddings = self.item_linear(self.item_raw_features) item_embeddings = torch.cat((self.i_embedding, item_embeddings), -1) user_e = F.dropout(self.u_embedding, dropout) item_e = F.dropout(item_embeddings, dropout) return user_e, item_e def calculate_loss(self, interaction): """ loss on one batch :param interaction: batch data format: tensor(3, batch_size) [0]: user list; [1]: positive items; [2]: negative items :return: """ user = interaction[0] pos_item = interaction[1] neg_item = interaction[2] user_embeddings, item_embeddings = self.forward() user_e = user_embeddings[user, :] pos_e = item_embeddings[pos_item, :] #neg_e = self.get_item_embedding(neg_item) neg_e = item_embeddings[neg_item, :] pos_item_score, neg_item_score = torch.mul(user_e, pos_e).sum(dim=1), torch.mul(user_e, neg_e).sum(dim=1) mf_loss = self.loss(pos_item_score, neg_item_score) reg_loss = self.reg_loss(user_e, pos_e, neg_e) loss = mf_loss + self.reg_weight * reg_loss return loss def full_sort_predict(self, interaction): user = interaction[0] user_embeddings, item_embeddings = self.forward() user_e = user_embeddings[user, :] all_item_e = item_embeddings score = torch.matmul(user_e, all_item_e.transpose(0, 1)) return score ================================================ FILE: src/utils/configurator.py ================================================ # coding: utf-8 # @email: enoche.chow@gmail.com # """ ################################ """ import re import os import yaml import torch from logging import getLogger class Config(object): """ Configurator module that load the defined parameters. Configurator module will first load the default parameters from the fixed properties in RecBole and then load parameters from the external input. External input supports three kind of forms: config file, command line and parameter dictionaries. - config file: It's a file that record the parameters to be modified or added. It should be in ``yaml`` format, e.g. a config file is 'example.yaml', the content is: learning_rate: 0.001 train_batch_size: 2048 - command line: It should be in the format as '---learning_rate=0.001' - parameter dictionaries: It should be a dict, where the key is parameter name and the value is parameter value, e.g. config_dict = {'learning_rate': 0.001} Configuration module allows the above three kind of external input format to be used together, the priority order is as following: command line > parameter dictionaries > config file e.g. If we set learning_rate=0.01 in config file, learning_rate=0.02 in command line, learning_rate=0.03 in parameter dictionaries. Finally the learning_rate is equal to 0.02. """ def __init__(self, model=None, dataset=None, config_dict=None, mg=False): """ Args: model (str/AbstractRecommender): the model name or the model class, default is None, if it is None, config will search the parameter 'model' from the external input as the model name or model class. dataset (str): the dataset name, default is None, if it is None, config will search the parameter 'dataset' from the external input as the dataset name. config_file_list (list of str): the external config file, it allows multiple config files, default is None. config_dict (dict): the external parameter dictionaries, default is None. """ # load dataset config file yaml if config_dict is None: config_dict = {} config_dict['model'] = model config_dict['dataset'] = dataset # model type self.final_config_dict = self._load_dataset_model_config(config_dict, mg) # config in cmd and main.py are latest self.final_config_dict.update(config_dict) self._set_default_parameters() self._init_device() def _load_dataset_model_config(self, config_dict, mg): file_config_dict = dict() file_list = [] # get dataset and model files cur_dir = os.getcwd() cur_dir = os.path.join(cur_dir, 'configs') file_list.append(os.path.join(cur_dir, "overall.yaml")) file_list.append(os.path.join(cur_dir, "dataset", "{}.yaml".format(config_dict['dataset']))) file_list.append(os.path.join(cur_dir, "model", "{}.yaml".format(config_dict['model']))) if mg: file_list.append(os.path.join(cur_dir, "mg.yaml")) hyper_parameters = [] for file in file_list: if os.path.isfile(file): with open(file, 'r', encoding='utf-8') as f: fdata = yaml.load(f.read(), Loader=self._build_yaml_loader()) if fdata.get('hyper_parameters'): hyper_parameters.extend(fdata['hyper_parameters']) file_config_dict.update(fdata) file_config_dict['hyper_parameters'] = hyper_parameters return file_config_dict def _build_yaml_loader(self): loader = yaml.FullLoader loader.add_implicit_resolver( u'tag:yaml.org,2002:float', re.compile(u'''^(?: [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)? |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) |\\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]* |[-+]?\\.(?:inf|Inf|INF) |\\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.')) return loader def _set_default_parameters(self): smaller_metric = ['rmse', 'mae', 'logloss'] valid_metric = self.final_config_dict['valid_metric'].split('@')[0] self.final_config_dict['valid_metric_bigger'] = False if valid_metric in smaller_metric else True # if seed not in hyper_parameters, then add if "seed" not in self.final_config_dict['hyper_parameters']: self.final_config_dict['hyper_parameters'] += ['seed'] def _init_device(self): use_gpu = self.final_config_dict['use_gpu'] if use_gpu: os.environ["CUDA_VISIBLE_DEVICES"] = str(self.final_config_dict['gpu_id']) self.final_config_dict['device'] = torch.device("cuda" if torch.cuda.is_available() and use_gpu else "cpu") def __setitem__(self, key, value): if not isinstance(key, str): raise TypeError("index must be a str.") self.final_config_dict[key] = value def __getitem__(self, item): if item in self.final_config_dict: return self.final_config_dict[item] else: return None def __contains__(self, key): if not isinstance(key, str): raise TypeError("index must be a str.") return key in self.final_config_dict def __str__(self): args_info = '\n' args_info += '\n'.join(["{}={}".format(arg, value) for arg, value in self.final_config_dict.items()]) args_info += '\n\n' return args_info def __repr__(self): return self.__str__() ================================================ FILE: src/utils/data_utils.py ================================================ import torch import random import torchvision.transforms as transforms from torchvision.transforms.functional import pad as img_pad from torchvision.transforms.functional import resize as img_resize from torch.nn.functional import interpolate as img_tensor_resize from torch.nn.functional import pad as img_tensor_pad from torch.nn.modules.utils import _quadruple import numbers import numpy as np from PIL import Image _pil_interpolation_to_str = { Image.NEAREST: 'PIL.Image.NEAREST', Image.BILINEAR: 'PIL.Image.BILINEAR', Image.BICUBIC: 'PIL.Image.BICUBIC', Image.LANCZOS: 'PIL.Image.LANCZOS', Image.HAMMING: 'PIL.Image.HAMMING', Image.BOX: 'PIL.Image.BOX', } def flat_list_of_lists(l): """flatten a list of lists [[1,2], [3,4]] to [1,2,3,4]""" return [item for sublist in l for item in sublist] def mask_batch_text_tokens( inputs, tokenizer, mlm_probability=0.15, is_train=True): """ modified from transformers.data.data_collator Args: inputs: (B, L), 2D torch.Tensor, does not work for 1D. It has already been padded. tokenizer: mlm_probability: float is_train: if True use random masking, else mask tokens at fixed position to remove randomness in evaluation. """ if tokenizer.mask_token is None: raise ValueError( "This tokenizer does not have a mask token which is necessary for masked language modeling. " "Remove the --mlm flag if you want to use this tokenizer." ) labels = inputs.clone() # We sample a few tokens in each sequence for masked-LM training # (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa) probability_matrix = torch.full(labels.shape, mlm_probability) special_tokens_mask = [ tokenizer.get_special_tokens_mask( val, already_has_special_tokens=True) for val in labels.tolist() ] probability_matrix.masked_fill_(torch.tensor( special_tokens_mask, dtype=torch.bool), value=0.0) if tokenizer._pad_token is not None: padding_mask = labels.eq(tokenizer.pad_token_id) probability_matrix.masked_fill_(padding_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() labels[~masked_indices] = -100 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = torch.bernoulli( torch.full(labels.shape, 0.8)).bool() & masked_indices inputs[indices_replaced] = tokenizer.convert_tokens_to_ids( tokenizer.mask_token) # 10% of the time, we replace masked input tokens with random word indices_random = torch.bernoulli( torch.full(labels.shape, 0.5) ).bool() & masked_indices & ~indices_replaced random_words = torch.randint( len(tokenizer), labels.shape, dtype=torch.long) # len(tokenizer) == #vocab inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels def image_to_tensor(image: np.ndarray, keepdim: bool = True) -> torch.Tensor: """Converts a numpy image to a PyTorch 4d tensor image. Args: image (numpy.ndarray): image of the form :math:`(H, W, C)`, :math:`(H, W)` or :math:`(B, H, W, C)`. keepdim (bool): If ``False`` unsqueeze the input image to match the shape :math:`(B, H, W, C)`. Default: ``True`` Returns: torch.Tensor: tensor of the form :math:`(B, C, H, W)` if keepdim is ``False``, :math:`(C, H, W)` otherwise. """ if not isinstance(image, (np.ndarray,)): raise TypeError("Input type must be a numpy.ndarray. Got {}".format( type(image))) if len(image.shape) > 4 or len(image.shape) < 2: raise ValueError( "Input size must be a two, three or four dimensional array") input_shape = image.shape tensor: torch.Tensor = torch.from_numpy(image) if len(input_shape) == 2: # (H, W) -> (1, H, W) tensor = tensor.unsqueeze(0) elif len(input_shape) == 3: # (H, W, C) -> (C, H, W) tensor = tensor.permute(2, 0, 1) elif len(input_shape) == 4: # (B, H, W, C) -> (B, C, H, W) tensor = tensor.permute(0, 3, 1, 2) keepdim = True # no need to unsqueeze else: raise ValueError( "Cannot process image with shape {}".format(input_shape)) return tensor.unsqueeze(0) if not keepdim else tensor def get_padding(image, max_w, max_h, pad_all=False): # keep the images to upper-left corner if isinstance(image, torch.Tensor): h, w = image.shape[-2:] else: w, h = image.size h_padding, v_padding = max_w - w, max_h - h if pad_all: h_padding /= 2 v_padding /= 2 l_pad = h_padding if h_padding % 1 == 0 else h_padding+0.5 t_pad = v_padding if v_padding % 1 == 0 else v_padding+0.5 r_pad = h_padding if h_padding % 1 == 0 else h_padding-0.5 b_pad = v_padding if v_padding % 1 == 0 else v_padding-0.5 else: l_pad, t_pad = 0, 0 r_pad, b_pad = h_padding, v_padding if isinstance(image, torch.Tensor): padding = (int(l_pad), int(r_pad), int(t_pad), int(b_pad)) else: padding = (int(l_pad), int(t_pad), int(r_pad), int(b_pad)) return padding class ImagePad(object): def __init__(self, max_w, max_h, fill=0, padding_mode='constant'): assert isinstance(fill, (numbers.Number, str, tuple)) assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'] self.max_w = max_w self.max_h = max_h self.fill = fill self.padding_mode = padding_mode def __call__(self, img): """ Args: img (PIL Image): Image to be padded. Returns: PIL Image: Padded image. """ if isinstance(img, torch.Tensor): paddings = _quadruple(get_padding(img, self.max_w, self.max_h)) return img_tensor_pad( img, paddings, self.padding_mode, self.fill) return img_pad( img, get_padding(img, self.max_w, self.max_h), self.fill, self.padding_mode) def __repr__(self): return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\ format(self.fill, self.padding_mode) def get_resize_size(image, max_size): """ Args: image: PIL Image or torch.tensor max_size: Returns: Note the height/width order difference >>> pil_img = Image.open("raw_img_tensor.jpg") >>> pil_img.size (640, 480) # (width, height) >>> np_img = np.array(pil_img) >>> np_img.shape (480, 640, 3) # (height, width, 3) """ # note the order of height and width for different inputs if isinstance(image, torch.Tensor): # width, height = image.shape[-2:] height, width = image.shape[-2:] else: width, height = image.size if height >= width: ratio = width*1./height new_height = max_size new_width = new_height * ratio else: ratio = height*1./width new_width = max_size new_height = new_width * ratio size = (int(new_height), int(new_width)) return size class ImageResize(object): """Resize the input image (torch.tensor) to the given size. Args: max_size (int): Desired output size. If size is a sequence like (h, w), output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if height > width, then image will be rescaled to (size * height / width, size) interpolation (int, optional): Desired interpolation. Default is ``PIL.Image.BILINEAR`` """ def __init__(self, max_size, interpolation=Image.BILINEAR): assert isinstance(max_size, int) self.max_size = max_size self.interpolation = interpolation def __call__(self, img): """ Args: img (torch.tensor): Image to be scaled. Returns: torch.tensor: Rescaled image. """ if isinstance(img, torch.Tensor): assert isinstance(self.interpolation, str) return img_tensor_resize( img, size=get_resize_size(img, self.max_size), mode=self.interpolation, align_corners=False) return img_resize( img, get_resize_size(img, self.max_size), self.interpolation) def __repr__(self): interpolate_str = _pil_interpolation_to_str[self.interpolation] return self.__class__.__name__ + '(size={0}, interpolation={1})'.format( self.size, interpolate_str) def get_imagenet_transform(min_size=600, max_size=1000): """parameters from https://github.com/pytorch/examples/blob/master/imagenet/main.py This simply crop the center square from the image """ if min_size != 600: import warnings warnings.warn(f'Warning: min_size is not used in image transform, ' f'setting min_size will have no effect.') return transforms.Compose([ ImageResize(max_size, Image.BILINEAR), # longer side will be resized to 1000 ImagePad(max_size, max_size), # pad to 1000 * 1000 ]) class ImageNorm(object): """Apply Normalization to Image Pixels on GPU """ def __init__(self, mean, std): self.mean = torch.tensor(mean).cuda().view(1, 1, 3, 1, 1) self.std = torch.tensor(std).cuda().view(1, 1, 3, 1, 1) # assert max(std) <= 1 and min(std) >= 0\ # or max(mean) <= 1 and min(mean) >= 0,\ # "Please provide mean or std within range [0, 1]" def __call__(self, img): """ Args: img: float image tensors, (B, N, 3, H, W) Returns: img: normalized float image tensors """ if torch.max(img) > 1 and self.mean.max() <= 1: img.div_(255.) return img.sub_(self.mean).div_(self.std) def chunk_list(examples, chunk_size=2, pad_to_divisible=True): """ Args: examples: iterable, examples grouped by image/video chunk_size: int, number of examples in each chunk. pad_to_divisible: bool, pad the examples to be divisible by chunk_size. >>> test_examples = [3, 4, 5, 6, 7] >>> chunk_list(test_examples, chunk_size=2, pad_to_divisible=True) [[3, 4], [5, 6], [7, 7]] # the lst element has some randomness >>> chunk_list(test_examples, chunk_size=2, pad_to_divisible=False) [[3, 4], [5, 6], [7]] """ n_examples = len(examples) remainder = n_examples % chunk_size if pad_to_divisible and remainder > 0: n_pad = chunk_size - remainder pad = random.choices(examples, k=n_pad) # with replacement examples = examples + pad n_examples = len(examples) remainder = 0 chunked_examples = [] n_chunks = int(n_examples / chunk_size) n_chunks = n_chunks + 1 if remainder > 0 else n_chunks for i in range(n_chunks): chunked_examples.append(examples[i*chunk_size: (i+1)*chunk_size]) return chunked_examples def mk_input_group(key_grouped_examples, max_n_example_per_group=2, is_train=True, example_unique_key=None): """ Re-organize examples into groups. Each input group will have a single image paired with X (X=max_n_example_per_img) examples. Images with total #examples > X will be split into multiple groups. In the case a group has < X examples, we will copy the examples to make the group has X examples. Args: key_grouped_examples: dict, each key is image/video id, each value is a list(example) associated with this image/video max_n_example_per_group: int, pair max #examples with each image/video. Note that each image can have multiple groups. is_train: bool, if True, copy the examples to make sure each input group has max_n_example_per_group examples. example_unique_key: str, used to make sure no inputs are discarded by matching the input and output ids specified by `example_unique_key` """ input_groups = [] # each element is (id, list(example)) for k, examples in key_grouped_examples.items(): chunked_examples = chunk_list(examples, chunk_size=max_n_example_per_group, pad_to_divisible=is_train) for c in chunked_examples: # if len(c) == 0: # continue input_groups.append((k, c)) if example_unique_key is not None: print(f"Using example_unique_key {example_unique_key} to check whether input and output ids m") # sanity check: make sure we did not discard any input example by accident. input_question_ids = flat_list_of_lists( [[sub_e[example_unique_key] for sub_e in e] for e in key_grouped_examples.values()]) output_question_ids = flat_list_of_lists( [[sub_e[example_unique_key] for sub_e in e[1]] for e in input_groups]) assert set(input_question_ids) == set(output_question_ids), "You are missing " return input_groups def repeat_tensor_rows(raw_tensor, row_repeats): """ repeat raw_tensor[i] row_repeats[i] times. Args: raw_tensor: (B, *) row_repeats: list(int), len(row_repeats) == len(raw_tensor) """ assert len(raw_tensor) == len(raw_tensor), "Has to be the same length" if sum(row_repeats) == len(row_repeats): return raw_tensor else: indices = torch.LongTensor( flat_list_of_lists([[i] * r for i, r in enumerate(row_repeats)]) ).to(raw_tensor.device) return raw_tensor.index_select(0, indices) #### Data utils import io def load_decompress_img_from_lmdb_value(lmdb_value): """ Args: lmdb_value: image binary from with open(filepath, "rb") as f: lmdb_value = f.read() Returns: PIL image, (h, w, c) """ io_stream = io.BytesIO(lmdb_value) img = Image.open(io_stream, mode="r") return img ================================================ FILE: src/utils/dataloader.py ================================================ # coding: utf-8 # @email: enoche.chow@gmail.com """ Wrap dataset into dataloader ################################################ """ import math import torch import random import numpy as np from logging import getLogger from scipy.sparse import coo_matrix class AbstractDataLoader(object): """:class:`AbstractDataLoader` is an abstract object which would return a batch of data which is loaded by :class:`~recbole.data.interaction.Interaction` when it is iterated. And it is also the ancestor of all other dataloader. Args: config (Config): The config of dataloader. dataset (Dataset): The dataset of dataloader. batch_size (int, optional): The batch_size of dataloader. Defaults to ``1``. dl_format (InputType, optional): The input type of dataloader. Defaults to :obj:`~recbole.utils.enum_type.InputType.POINTWISE`. shuffle (bool, optional): Whether the dataloader will be shuffle after a round. Defaults to ``False``. Attributes: dataset (Dataset): The dataset of this dataloader. shuffle (bool): If ``True``, dataloader will shuffle before every epoch. real_time (bool): If ``True``, dataloader will do data pre-processing, such as neg-sampling and data-augmentation. pr (int): Pointer of dataloader. step (int): The increment of :attr:`pr` for each batch. batch_size (int): The max interaction number for all batch. """ def __init__(self, config, dataset, additional_dataset=None, batch_size=1, neg_sampling=False, shuffle=False): self.config = config self.logger = getLogger() self.dataset = dataset self.dataset_bk = self.dataset.copy(self.dataset.df) # if config['model_type'] == ModelType.GENERAL: # self.dataset.df.drop(self.dataset.ts_id, inplace=True, axis=1) # elif config['model_type'] == ModelType.SEQUENTIAL: # # sort instances # pass self.additional_dataset = additional_dataset self.batch_size = batch_size self.step = batch_size self.shuffle = shuffle self.neg_sampling = neg_sampling self.device = config['device'] self.sparsity = 1 - self.dataset.inter_num / self.dataset.user_num / self.dataset.item_num self.pr = 0 self.inter_pr = 0 def pretrain_setup(self): """This function can be used to deal with some problems after essential args are initialized, such as the batch-size-adaptation when neg-sampling is needed, and so on. By default, it will do nothing. """ pass def data_preprocess(self): """This function is used to do some data preprocess, such as pre-neg-sampling and pre-data-augmentation. By default, it will do nothing. """ pass def __len__(self): return math.ceil(self.pr_end / self.step) def __iter__(self): if self.shuffle: self._shuffle() return self def __next__(self): if self.pr >= self.pr_end: self.pr = 0 self.inter_pr = 0 raise StopIteration() return self._next_batch_data() @property def pr_end(self): """This property marks the end of dataloader.pr which is used in :meth:`__next__()`.""" raise NotImplementedError('Method [pr_end] should be implemented') def _shuffle(self): """Shuffle the order of data, and it will be called by :meth:`__iter__()` if self.shuffle is True. """ raise NotImplementedError('Method [shuffle] should be implemented.') def _next_batch_data(self): """Assemble next batch of data in form of Interaction, and return these data. Returns: Interaction: The next batch of data. """ raise NotImplementedError('Method [next_batch_data] should be implemented.') class TrainDataLoader(AbstractDataLoader): """ General dataloader with negative sampling. """ def __init__(self, config, dataset, batch_size=1, shuffle=False): super().__init__(config, dataset, additional_dataset=None, batch_size=batch_size, neg_sampling=True, shuffle=shuffle) # special for training dataloader self.history_items_per_u = dict() # full items in training. self.all_items = self.dataset.df[self.dataset.iid_field].unique().tolist() self.all_uids = self.dataset.df[self.dataset.uid_field].unique() self.all_items_set = set(self.all_items) self.all_users_set = set(self.all_uids) self.all_item_len = len(self.all_items) # if full sampling self.use_full_sampling = config['use_full_sampling'] if config['use_neg_sampling']: if self.use_full_sampling: self.sample_func = self._get_full_uids_sample else: self.sample_func = self._get_neg_sample else: self.sample_func = self._get_non_neg_sample self._get_history_items_u() self.neighborhood_loss_required = config['use_neighborhood_loss'] if self.neighborhood_loss_required: self.history_users_per_i = {} self._get_history_users_i() self.user_user_dict = self._get_my_neighbors(self.config['USER_ID_FIELD']) self.item_item_dict = self._get_my_neighbors(self.config['ITEM_ID_FIELD']) def pretrain_setup(self): """ Reset dataloader. Outputing the same positive & negative samples with each training. :return: """ # sort & random if self.shuffle: self.dataset = self.dataset_bk.copy(self.dataset_bk.df) self.all_items.sort() if self.use_full_sampling: self.all_uids.sort() random.shuffle(self.all_items) # reorder dataset as default (chronological order) #self.dataset.sort_by_chronological() def inter_matrix(self, form='coo', value_field=None): """Get sparse matrix that describe interactions between user_id and item_id. Sparse matrix has shape (user_num, item_num). For a row of , ``matrix[src, tgt] = 1`` if ``value_field`` is ``None``, else ``matrix[src, tgt] = self.inter_feat[src, tgt]``. Args: form (str, optional): Sparse matrix format. Defaults to ``coo``. value_field (str, optional): Data of sparse matrix, which should exist in ``df_feat``. Defaults to ``None``. Returns: scipy.sparse: Sparse matrix in form ``coo`` or ``csr``. """ if not self.dataset.uid_field or not self.dataset.iid_field: raise ValueError('dataset doesn\'t exist uid/iid, thus can not converted to sparse matrix') return self._create_sparse_matrix(self.dataset.df, self.dataset.uid_field, self.dataset.iid_field, form, value_field) def _create_sparse_matrix(self, df_feat, source_field, target_field, form='coo', value_field=None): """Get sparse matrix that describe relations between two fields. Source and target should be token-like fields. Sparse matrix has shape (``self.num(source_field)``, ``self.num(target_field)``). For a row of , ``matrix[src, tgt] = 1`` if ``value_field`` is ``None``, else ``matrix[src, tgt] = df_feat[value_field][src, tgt]``. Args: df_feat (pandas.DataFrame): Feature where src and tgt exist. form (str, optional): Sparse matrix format. Defaults to ``coo``. value_field (str, optional): Data of sparse matrix, which should exist in ``df_feat``. Defaults to ``None``. Returns: scipy.sparse: Sparse matrix in form ``coo`` or ``csr``. """ src = df_feat[source_field].values tgt = df_feat[target_field].values if value_field is None: data = np.ones(len(df_feat)) else: if value_field not in df_feat.columns: raise ValueError('value_field [{}] should be one of `df_feat`\'s features.'.format(value_field)) data = df_feat[value_field].values mat = coo_matrix((data, (src, tgt)), shape=(self.dataset.user_num, self.dataset.item_num)) if form == 'coo': return mat elif form == 'csr': return mat.tocsr() else: raise NotImplementedError('sparse matrix format [{}] has not been implemented.'.format(form)) @property def pr_end(self): if self.use_full_sampling: return len(self.all_uids) return len(self.dataset) def _shuffle(self): self.dataset.shuffle() if self.use_full_sampling: np.random.shuffle(self.all_uids) def _next_batch_data(self): return self.sample_func() def _get_neg_sample(self): cur_data = self.dataset[self.pr: self.pr + self.step] self.pr += self.step # to tensor user_tensor = torch.tensor(cur_data[self.config['USER_ID_FIELD']].values).type(torch.LongTensor).to(self.device) item_tensor = torch.tensor(cur_data[self.config['ITEM_ID_FIELD']].values).type(torch.LongTensor).to(self.device) batch_tensor = torch.cat((torch.unsqueeze(user_tensor, 0), torch.unsqueeze(item_tensor, 0))) u_ids = cur_data[self.config['USER_ID_FIELD']] # sampling negative items only in the dataset (train) neg_ids = self._sample_neg_ids(u_ids).to(self.device) # for neighborhood loss if self.neighborhood_loss_required: i_ids = cur_data[self.config['ITEM_ID_FIELD']] pos_neighbors, neg_neighbors = self._get_neighborhood_samples(i_ids, self.config['ITEM_ID_FIELD']) pos_neighbors, neg_neighbors = pos_neighbors.to(self.device), neg_neighbors.to(self.device) batch_tensor = torch.cat((batch_tensor, neg_ids.unsqueeze(0), pos_neighbors.unsqueeze(0), neg_neighbors.unsqueeze(0))) # merge negative samples else: batch_tensor = torch.cat((batch_tensor, neg_ids.unsqueeze(0))) return batch_tensor def _get_non_neg_sample(self): cur_data = self.dataset[self.pr: self.pr + self.step] self.pr += self.step # to tensor user_tensor = torch.tensor(cur_data[self.config['USER_ID_FIELD']].values).type(torch.LongTensor).to(self.device) item_tensor = torch.tensor(cur_data[self.config['ITEM_ID_FIELD']].values).type(torch.LongTensor).to(self.device) batch_tensor = torch.cat((torch.unsqueeze(user_tensor, 0), torch.unsqueeze(item_tensor, 0))) return batch_tensor def _get_full_uids_sample(self): user_tensor = torch.tensor(self.all_uids[self.pr: self.pr + self.step]).type(torch.LongTensor).to(self.device) self.pr += self.step return user_tensor def _sample_neg_ids(self, u_ids): neg_ids = [] for u in u_ids: # random 1 item iid = self._random() while iid in self.history_items_per_u[u]: iid = self._random() neg_ids.append(iid) return torch.tensor(neg_ids).type(torch.LongTensor) def _get_my_neighbors(self, id_str): ret_dict = {} a2b_dict = self.history_items_per_u if id_str == self.config['USER_ID_FIELD'] else self.history_users_per_i b2a_dict = self.history_users_per_i if id_str == self.config['USER_ID_FIELD'] else self.history_items_per_u for i, j in a2b_dict.items(): k = set() for m in j: k |= b2a_dict.get(m, set()).copy() k.discard(i) # remove myself ret_dict[i] = k return ret_dict def _get_neighborhood_samples(self, ids, id_str): a2a_dict = self.user_user_dict if id_str == self.config['USER_ID_FIELD'] else self.item_item_dict all_set = self.all_users_set if id_str == self.config['USER_ID_FIELD'] else self.all_items_set pos_ids, neg_ids = [], [] for i in ids: pos_ids_my = a2a_dict[i] if len(pos_ids_my) <= 0 or len(pos_ids_my)/len(all_set) > 0.8: pos_ids.append(0) neg_ids.append(0) continue pos_id = random.sample(pos_ids_my, 1)[0] pos_ids.append(pos_id) neg_id = random.sample(all_set, 1)[0] while neg_id in pos_ids_my: neg_id = random.sample(all_set, 1)[0] neg_ids.append(neg_id) return torch.tensor(pos_ids).type(torch.LongTensor), torch.tensor(neg_ids).type(torch.LongTensor) def _random(self): rd_id = random.sample(self.all_items, 1)[0] return rd_id def _get_history_items_u(self): uid_field = self.dataset.uid_field iid_field = self.dataset.iid_field # load avail items for all uid uid_freq = self.dataset.df.groupby(uid_field)[iid_field] for u, u_ls in uid_freq: self.history_items_per_u[u] = set(u_ls.values) return self.history_items_per_u def _get_history_users_i(self): uid_field = self.dataset.uid_field iid_field = self.dataset.iid_field # load avail items for all uid iid_freq = self.dataset.df.groupby(iid_field)[uid_field] for i, u_ls in iid_freq: self.history_users_per_i[i] = set(u_ls.values) return self.history_users_per_i class EvalDataLoader(AbstractDataLoader): """ additional_dataset: training dataset in evaluation """ def __init__(self, config, dataset, additional_dataset=None, batch_size=1, shuffle=False): super().__init__(config, dataset, additional_dataset=additional_dataset, batch_size=batch_size, neg_sampling=False, shuffle=shuffle) if additional_dataset is None: raise ValueError('Training datasets is nan') self.eval_items_per_u = [] self.eval_len_list = [] self.train_pos_len_list = [] self.eval_u = self.dataset.df[self.dataset.uid_field].unique() # special for eval dataloader self.pos_items_per_u = self._get_pos_items_per_u(self.eval_u).to(self.device) self._get_eval_items_per_u(self.eval_u) # to device self.eval_u = torch.tensor(self.eval_u).type(torch.LongTensor).to(self.device) @property def pr_end(self): return self.eval_u.shape[0] def _shuffle(self): self.dataset.shuffle() def _next_batch_data(self): inter_cnt = sum(self.train_pos_len_list[self.pr: self.pr+self.step]) batch_users = self.eval_u[self.pr: self.pr + self.step] batch_mask_matrix = self.pos_items_per_u[:, self.inter_pr: self.inter_pr+inter_cnt].clone() # user_ids to index batch_mask_matrix[0] -= self.pr self.inter_pr += inter_cnt self.pr += self.step return [batch_users, batch_mask_matrix] def _get_pos_items_per_u(self, eval_users): """ history items in training dataset. masking out positive items in evaluation :return: user_id - item_ids matrix [[0, 0, ... , 1, ...], [0, 1, ... , 0, ...]] """ uid_field = self.additional_dataset.uid_field iid_field = self.additional_dataset.iid_field # load avail items for all uid uid_freq = self.additional_dataset.df.groupby(uid_field)[iid_field] u_ids = [] i_ids = [] for i, u in enumerate(eval_users): u_ls = uid_freq.get_group(u).values i_len = len(u_ls) self.train_pos_len_list.append(i_len) u_ids.extend([i]*i_len) i_ids.extend(u_ls) return torch.tensor([u_ids, i_ids]).type(torch.LongTensor) def _get_eval_items_per_u(self, eval_users): """ get evaluated items for each u :return: """ uid_field = self.dataset.uid_field iid_field = self.dataset.iid_field # load avail items for all uid uid_freq = self.dataset.df.groupby(uid_field)[iid_field] for u in eval_users: u_ls = uid_freq.get_group(u).values self.eval_len_list.append(len(u_ls)) self.eval_items_per_u.append(u_ls) self.eval_len_list = np.asarray(self.eval_len_list) # return pos_items for each u def get_eval_items(self): return self.eval_items_per_u def get_eval_len_list(self): return self.eval_len_list def get_eval_users(self): return self.eval_u.cpu() ================================================ FILE: src/utils/dataset.py ================================================ # coding: utf-8 # @email: enoche.chow@gmail.com # # updated: Mar. 25, 2022 # Filled non-existing raw features with non-zero after encoded from encoders """ Data pre-processing ########################## """ from logging import getLogger from collections import Counter import os import pandas as pd import numpy as np import torch from utils.data_utils import (ImageResize, ImagePad, image_to_tensor, load_decompress_img_from_lmdb_value) import lmdb class RecDataset(object): def __init__(self, config, df=None): self.config = config self.logger = getLogger() # data path & files self.dataset_name = config['dataset'] self.dataset_path = os.path.abspath(config['data_path']+self.dataset_name) # dataframe self.uid_field = self.config['USER_ID_FIELD'] self.iid_field = self.config['ITEM_ID_FIELD'] self.splitting_label = self.config['inter_splitting_label'] if df is not None: self.df = df return # if all files exists check_file_list = [self.config['inter_file_name']] for i in check_file_list: file_path = os.path.join(self.dataset_path, i) if not os.path.isfile(file_path): raise ValueError('File {} not exist'.format(file_path)) # load rating file from data path? self.load_inter_graph(config['inter_file_name']) self.item_num = int(max(self.df[self.iid_field].values)) + 1 self.user_num = int(max(self.df[self.uid_field].values)) + 1 def load_inter_graph(self, file_name): inter_file = os.path.join(self.dataset_path, file_name) cols = [self.uid_field, self.iid_field, self.splitting_label] self.df = pd.read_csv(inter_file, usecols=cols, sep=self.config['field_separator']) if not self.df.columns.isin(cols).all(): raise ValueError('File {} lost some required columns.'.format(inter_file)) def split(self): dfs = [] # splitting into training/validation/test for i in range(3): temp_df = self.df[self.df[self.splitting_label] == i].copy() temp_df.drop(self.splitting_label, inplace=True, axis=1) # no use again dfs.append(temp_df) if self.config['filter_out_cod_start_users']: # filtering out new users in val/test sets train_u = set(dfs[0][self.uid_field].values) for i in [1, 2]: dropped_inter = pd.Series(True, index=dfs[i].index) dropped_inter ^= dfs[i][self.uid_field].isin(train_u) dfs[i].drop(dfs[i].index[dropped_inter], inplace=True) # wrap as RecDataset full_ds = [self.copy(_) for _ in dfs] return full_ds def copy(self, new_df): """Given a new interaction feature, return a new :class:`Dataset` object, whose interaction feature is updated with ``new_df``, and all the other attributes the same. Args: new_df (pandas.DataFrame): The new interaction feature need to be updated. Returns: :class:`~Dataset`: the new :class:`~Dataset` object, whose interaction feature has been updated. """ nxt = RecDataset(self.config, new_df) nxt.item_num = self.item_num nxt.user_num = self.user_num return nxt def get_user_num(self): return self.user_num def get_item_num(self): return self.item_num def shuffle(self): """Shuffle the interaction records inplace. """ self.df = self.df.sample(frac=1, replace=False).reset_index(drop=True) def __len__(self): return len(self.df) def __getitem__(self, idx): # Series result return self.df.iloc[idx] def __repr__(self): return self.__str__() def __str__(self): info = [self.dataset_name] self.inter_num = len(self.df) uni_u = pd.unique(self.df[self.uid_field]) uni_i = pd.unique(self.df[self.iid_field]) tmp_user_num, tmp_item_num = 0, 0 if self.uid_field: tmp_user_num = len(uni_u) avg_actions_of_users = self.inter_num/tmp_user_num info.extend(['The number of users: {}'.format(tmp_user_num), 'Average actions of users: {}'.format(avg_actions_of_users)]) if self.iid_field: tmp_item_num = len(uni_i) avg_actions_of_items = self.inter_num/tmp_item_num info.extend(['The number of items: {}'.format(tmp_item_num), 'Average actions of items: {}'.format(avg_actions_of_items)]) info.append('The number of inters: {}'.format(self.inter_num)) if self.uid_field and self.iid_field: sparsity = 1 - self.inter_num / tmp_user_num / tmp_item_num info.append('The sparsity of the dataset: {}%'.format(sparsity * 100)) return '\n'.join(info) ================================================ FILE: src/utils/logger.py ================================================ # coding: utf-8 # @email: enoche.chow@gmail.com """ ############################### """ import logging import os from utils.utils import get_local_time def init_logger(config): """ A logger that can show a message on standard output and write it into the file named `filename` simultaneously. All the message that you want to log MUST be str. Args: config (Config): An instance object of Config, used to record parameter information. """ LOGROOT = './log/' dir_name = os.path.dirname(LOGROOT) if not os.path.exists(dir_name): os.makedirs(dir_name) logfilename = '{}-{}-{}.log'.format(config['model'], config['dataset'], get_local_time()) logfilepath = os.path.join(LOGROOT, logfilename) filefmt = "%(asctime)-15s %(levelname)s %(message)s" filedatefmt = "%a %d %b %Y %H:%M:%S" fileformatter = logging.Formatter(filefmt, filedatefmt) sfmt = u"%(asctime)-15s %(levelname)s %(message)s" sdatefmt = "%d %b %H:%M" sformatter = logging.Formatter(sfmt, sdatefmt) if config['state'] is None or config['state'].lower() == 'info': level = logging.INFO elif config['state'].lower() == 'debug': level = logging.DEBUG elif config['state'].lower() == 'error': level = logging.ERROR elif config['state'].lower() == 'warning': level = logging.WARNING elif config['state'].lower() == 'critical': level = logging.CRITICAL else: level = logging.INFO # comment following 3 lines and handlers = [sh, fh] to cancel file dump. fh = logging.FileHandler(logfilepath, 'w', 'utf-8') fh.setLevel(level) fh.setFormatter(fileformatter) sh = logging.StreamHandler() sh.setLevel(level) sh.setFormatter(sformatter) logging.basicConfig( level=level, #handlers=[sh] handlers = [sh, fh] ) ================================================ FILE: src/utils/metrics.py ================================================ # encoding: utf-8 # @email: enoche.chow@gmail.com """ ############################ """ from logging import getLogger import numpy as np def recall_(pos_index, pos_len): # Recall: average single users recall ratio. rec_ret = np.cumsum(pos_index, axis=1) / pos_len.reshape(-1, 1) return rec_ret.mean(axis=0) def recall2_(pos_index, pos_len): r""" All hits are summed up and then averaged for recall. :param pos_index: :param pos_len: :return: """ rec_cum = np.cumsum(pos_index, axis=1) rec_ret = rec_cum.sum(axis=0) / pos_len.sum() return rec_ret def ndcg_(pos_index, pos_len): r"""NDCG_ (also known as normalized discounted cumulative gain) is a measure of ranking quality. Through normalizing the score, users and their recommendation list results in the whole test set can be evaluated. .. _NDCG: https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG .. math:: \begin{gather} \mathrm {DCG@K}=\sum_{i=1}^{K} \frac{2^{rel_i}-1}{\log_{2}{(i+1)}}\\ \mathrm {IDCG@K}=\sum_{i=1}^{K}\frac{1}{\log_{2}{(i+1)}}\\ \mathrm {NDCG_u@K}=\frac{DCG_u@K}{IDCG_u@K}\\ \mathrm {NDCG@K}=\frac{\sum \nolimits_{u \in u^{te}NDCG_u@K}}{|u^{te}|} \end{gather} :math:`K` stands for recommending :math:`K` items. And the :math:`rel_i` is the relevance of the item in position :math:`i` in the recommendation list. :math:`2^{rel_i}` equals to 1 if the item hits otherwise 0. :math:`U^{te}` is for all users in the test set. """ len_rank = np.full_like(pos_len, pos_index.shape[1]) idcg_len = np.where(pos_len > len_rank, len_rank, pos_len) iranks = np.zeros_like(pos_index, dtype=np.float) iranks[:, :] = np.arange(1, pos_index.shape[1] + 1) idcg = np.cumsum(1.0 / np.log2(iranks + 1), axis=1) for row, idx in enumerate(idcg_len): idcg[row, idx:] = idcg[row, idx - 1] ranks = np.zeros_like(pos_index, dtype=np.float) ranks[:, :] = np.arange(1, pos_index.shape[1] + 1) dcg = 1.0 / np.log2(ranks + 1) dcg = np.cumsum(np.where(pos_index, dcg, 0), axis=1) result = dcg / idcg return result.mean(axis=0) def map_(pos_index, pos_len): r"""MAP_ (also known as Mean Average Precision) The MAP is meant to calculate Avg. Precision for the relevant items. Note: In this case the normalization factor used is :math:`\frac{1}{\min (m,N)}`, which prevents your AP score from being unfairly suppressed when your number of recommendations couldn't possibly capture all the correct ones. .. _map: http://sdsawtelle.github.io/blog/output/mean-average-precision-MAP-for-recommender-systems.html#MAP-for-Recommender-Algorithms .. math:: \begin{align*} \mathrm{AP@N} &= \frac{1}{\mathrm{min}(m,N)}\sum_{k=1}^N P(k) \cdot rel(k) \\ \mathrm{MAP@N}& = \frac{1}{|U|}\sum_{u=1}^{|U|}(\mathrm{AP@N})_u \end{align*} """ pre = pos_index.cumsum(axis=1) / np.arange(1, pos_index.shape[1] + 1) sum_pre = np.cumsum(pre * pos_index.astype(np.float), axis=1) len_rank = np.full_like(pos_len, pos_index.shape[1]) actual_len = np.where(pos_len > len_rank, len_rank, pos_len) result = np.zeros_like(pos_index, dtype=np.float) for row, lens in enumerate(actual_len): ranges = np.arange(1, pos_index.shape[1]+1) ranges[lens:] = ranges[lens - 1] result[row] = sum_pre[row] / ranges return result.mean(axis=0) def precision_(pos_index, pos_len): r"""Precision_ (also called positive predictive value) is the fraction of relevant instances among the retrieved instances .. _precision: https://en.wikipedia.org/wiki/Precision_and_recall#Precision .. math:: \mathrm {Precision@K} = \frac{|Rel_u \cap Rec_u|}{Rec_u} :math:`Rel_u` is the set of items relavent to user :math:`U`, :math:`Rec_u` is the top K items recommended to users. We obtain the result by calculating the average :math:`Precision@K` of each user. """ rec_ret = pos_index.cumsum(axis=1) / np.arange(1, pos_index.shape[1] + 1) return rec_ret.mean(axis=0) """Function name and function mapper. Useful when we have to serialize evaluation metric names and call the functions based on deserialized names """ metrics_dict = { 'ndcg': ndcg_, 'recall': recall_, 'recall2': recall2_, 'precision': precision_, 'map': map_, } ================================================ FILE: src/utils/misc.py ================================================ # coding: utf-8 # @email: enoche.chow@gmail.com """ modified from UNITER """ import json import random import sys import torch import numpy as np class NoOp(object): """ useful for distributed training No-Ops """ def __getattr__(self, name): return self.noop def noop(self, *args, **kwargs): return def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) def zero_none_grad(model): for p in model.parameters(): if p.grad is None and p.requires_grad: p.grad = p.data.new(p.size()).zero_() ================================================ FILE: src/utils/quick_start.py ================================================ # coding: utf-8 # @email: enoche.chow@gmail.com """ Run application ########################## """ from logging import getLogger from itertools import product from utils.dataset import RecDataset from utils.dataloader import TrainDataLoader, EvalDataLoader from utils.logger import init_logger from utils.configurator import Config from utils.utils import init_seed, get_model, get_trainer, dict2str import platform import os def quick_start(model, dataset, config_dict, save_model=True, mg=False): # merge config dict config = Config(model, dataset, config_dict, mg) init_logger(config) logger = getLogger() # print config infor logger.info('██Server: \t' + platform.node()) logger.info('██Dir: \t' + os.getcwd() + '\n') logger.info(config) # load data dataset = RecDataset(config) # print dataset statistics logger.info(str(dataset)) train_dataset, valid_dataset, test_dataset = dataset.split() logger.info('\n====Training====\n' + str(train_dataset)) logger.info('\n====Validation====\n' + str(valid_dataset)) logger.info('\n====Testing====\n' + str(test_dataset)) # wrap into dataloader train_data = TrainDataLoader(config, train_dataset, batch_size=config['train_batch_size'], shuffle=True) (valid_data, test_data) = ( EvalDataLoader(config, valid_dataset, additional_dataset=train_dataset, batch_size=config['eval_batch_size']), EvalDataLoader(config, test_dataset, additional_dataset=train_dataset, batch_size=config['eval_batch_size'])) ############ Dataset loadded, run model hyper_ret = [] val_metric = config['valid_metric'].lower() best_test_value = 0.0 idx = best_test_idx = 0 logger.info('\n\n=================================\n\n') # hyper-parameters hyper_ls = [] if "seed" not in config['hyper_parameters']: config['hyper_parameters'] = ['seed'] + config['hyper_parameters'] for i in config['hyper_parameters']: hyper_ls.append(config[i] or [None]) # combinations combinators = list(product(*hyper_ls)) total_loops = len(combinators) for hyper_tuple in combinators: # random seed reset for j, k in zip(config['hyper_parameters'], hyper_tuple): config[j] = k init_seed(config['seed']) logger.info('========={}/{}: Parameters:{}={}======='.format( idx+1, total_loops, config['hyper_parameters'], hyper_tuple)) # set random state of dataloader train_data.pretrain_setup() # model loading and initialization model = get_model(config['model'])(config, train_data).to(config['device']) logger.info(model) # trainer loading and initialization trainer = get_trainer()(config, model, mg) # debug # model training best_valid_score, best_valid_result, best_test_upon_valid = trainer.fit(train_data, valid_data=valid_data, test_data=test_data, saved=save_model) ######### hyper_ret.append((hyper_tuple, best_valid_result, best_test_upon_valid)) # save best test if best_test_upon_valid[val_metric] > best_test_value: best_test_value = best_test_upon_valid[val_metric] best_test_idx = idx idx += 1 logger.info('best valid result: {}'.format(dict2str(best_valid_result))) logger.info('test result: {}'.format(dict2str(best_test_upon_valid))) logger.info('████Current BEST████:\nParameters: {}={},\n' 'Valid: {},\nTest: {}\n\n\n'.format(config['hyper_parameters'], hyper_ret[best_test_idx][0], dict2str(hyper_ret[best_test_idx][1]), dict2str(hyper_ret[best_test_idx][2]))) # log info logger.info('\n============All Over=====================') for (p, k, v) in hyper_ret: logger.info('Parameters: {}={},\n best valid: {},\n best test: {}'.format(config['hyper_parameters'], p, dict2str(k), dict2str(v))) logger.info('\n\n█████████████ BEST ████████████████') logger.info('\tParameters: {}={},\nValid: {},\nTest: {}\n\n'.format(config['hyper_parameters'], hyper_ret[best_test_idx][0], dict2str(hyper_ret[best_test_idx][1]), dict2str(hyper_ret[best_test_idx][2]))) ================================================ FILE: src/utils/topk_evaluator.py ================================================ # coding: utf-8 # @email: enoche.chow@gmail.com """ ################################ """ import os import numpy as np import pandas as pd import torch from utils.metrics import metrics_dict from torch.nn.utils.rnn import pad_sequence from utils.utils import get_local_time # These metrics are typical in topk recommendations topk_metrics = {metric.lower(): metric for metric in ['Recall', 'Recall2', 'Precision', 'NDCG', 'MAP']} class TopKEvaluator(object): r"""TopK Evaluator is mainly used in ranking tasks. Now, we support six topk metrics which contain `'Hit', 'Recall', 'MRR', 'Precision', 'NDCG', 'MAP'`. Note: The metrics used calculate group-based metrics which considers the metrics scores averaged across users. Some of them are also limited to k. """ def __init__(self, config): self.config = config self.metrics = config['metrics'] self.topk = config['topk'] self.save_recom_result = config['save_recommended_topk'] self._check_args() def collect(self, interaction, scores_tensor, full=False): """collect the topk intermediate result of one batch, this function mainly implements padding and TopK finding. It is called at the end of each batch Args: interaction (Interaction): :class:`AbstractEvaluator` of the batch scores_tensor (tensor): the tensor of model output with size of `(N, )` full (bool, optional): whether it is full sort. Default: False. """ user_len_list = interaction.user_len_list if full is True: scores_matrix = scores_tensor.view(len(user_len_list), -1) else: scores_list = torch.split(scores_tensor, user_len_list, dim=0) scores_matrix = pad_sequence(scores_list, batch_first=True, padding_value=-np.inf) # nusers x items # get topk _, topk_index = torch.topk(scores_matrix, max(self.topk), dim=-1) # nusers x k return topk_index def evaluate(self, batch_matrix_list, eval_data, is_test=False, idx=0): """calculate the metrics of all batches. It is called at the end of each epoch Args: batch_matrix_list (list): the results of all batches eval_data (Dataset): the class of test data is_test: in testing? Returns: dict: such as ``{'Hit@20': 0.3824, 'Recall@20': 0.0527, 'Hit@10': 0.3153, 'Recall@10': 0.0329}`` """ pos_items = eval_data.get_eval_items() pos_len_list = eval_data.get_eval_len_list() topk_index = torch.cat(batch_matrix_list, dim=0).cpu().numpy() # if save recommendation result? if self.save_recom_result and is_test: dataset_name = self.config['dataset'] model_name = self.config['model'] max_k = max(self.topk) dir_name = os.path.abspath(self.config['recommend_topk']) if not os.path.exists(dir_name): os.makedirs(dir_name) file_path = os.path.join(dir_name, '{}-{}-idx{}-top{}-{}.csv'.format( model_name, dataset_name, idx, max_k, get_local_time())) x_df = pd.DataFrame(topk_index) x_df.insert(0, 'id', eval_data.get_eval_users()) x_df.columns = ['id']+['top_'+str(i) for i in range(max_k)] x_df = x_df.astype(int) x_df.to_csv(file_path, sep='\t', index=False) assert len(pos_len_list) == len(topk_index) # if recom right? bool_rec_matrix = [] for m, n in zip(pos_items, topk_index): bool_rec_matrix.append([True if i in m else False for i in n]) bool_rec_matrix = np.asarray(bool_rec_matrix) # get metrics metric_dict = {} result_list = self._calculate_metrics(pos_len_list, bool_rec_matrix) for metric, value in zip(self.metrics, result_list): for k in self.topk: key = '{}@{}'.format(metric, k) metric_dict[key] = round(value[k - 1], 4) return metric_dict def _check_args(self): # Check metrics if isinstance(self.metrics, (str, list)): if isinstance(self.metrics, str): self.metrics = [self.metrics] else: raise TypeError('metrics must be str or list') # Convert metric to lowercase for m in self.metrics: if m.lower() not in topk_metrics: raise ValueError("There is no user grouped topk metric named {}!".format(m)) self.metrics = [metric.lower() for metric in self.metrics] # Check topk: if isinstance(self.topk, (int, list)): if isinstance(self.topk, int): self.topk = [self.topk] for topk in self.topk: if topk <= 0: raise ValueError( 'topk must be a positive integer or a list of positive integers, but get `{}`'.format(topk)) else: raise TypeError('The topk must be a integer, list') def _calculate_metrics(self, pos_len_list, topk_index): """integrate the results of each batch and evaluate the topk metrics by users Args: pos_len_list (list): a list of users' positive items topk_index (np.ndarray): a matrix which contains the index of the topk items for users Returns: np.ndarray: a matrix which contains the metrics result """ result_list = [] for metric in self.metrics: metric_fuc = metrics_dict[metric.lower()] result = metric_fuc(topk_index, pos_len_list) result_list.append(result) return np.stack(result_list, axis=0) def __str__(self): mesg = 'The TopK Evaluator Info:\n' + '\tMetrics:[' + ', '.join( [topk_metrics[metric.lower()] for metric in self.metrics]) \ + '], TopK:[' + ', '.join(map(str, self.topk)) + ']' return mesg ================================================ FILE: src/utils/utils.py ================================================ # coding: utf-8 # @email : enoche.chow@gmail.com """ Utility functions ########################## """ import numpy as np import torch import importlib import datetime import random def get_local_time(): r"""Get current time Returns: str: current time """ cur = datetime.datetime.now() cur = cur.strftime('%b-%d-%Y-%H-%M-%S') return cur def get_model(model_name): r"""Automatically select model class based on model name Args: model_name (str): model name Returns: Recommender: model class """ model_file_name = model_name.lower() module_path = '.'.join(['models', model_file_name]) if importlib.util.find_spec(module_path, __name__): model_module = importlib.import_module(module_path, __name__) model_class = getattr(model_module, model_name) return model_class def get_trainer(): return getattr(importlib.import_module('common.trainer'), 'Trainer') def init_seed(seed): random.seed(seed) np.random.seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.manual_seed(seed) def early_stopping(value, best, cur_step, max_step, bigger=True): r""" validation-based early stopping Args: value (float): current result best (float): best result cur_step (int): the number of consecutive steps that did not exceed the best result max_step (int): threshold steps for stopping bigger (bool, optional): whether the bigger the better Returns: tuple: - float, best result after this step - int, the number of consecutive steps that did not exceed the best result after this step - bool, whether to stop - bool, whether to update """ stop_flag = False update_flag = False if bigger: if value > best: cur_step = 0 best = value update_flag = True else: cur_step += 1 if cur_step > max_step: stop_flag = True else: if value < best: cur_step = 0 best = value update_flag = True else: cur_step += 1 if cur_step > max_step: stop_flag = True return best, cur_step, stop_flag, update_flag def dict2str(result_dict): r""" convert result dict to str Args: result_dict (dict): result dict Returns: str: result str """ result_str = '' for metric, value in result_dict.items(): result_str += str(metric) + ': ' + '%.04f' % value + ' ' return result_str ############ LATTICE Utilities ######### def build_knn_neighbourhood(adj, topk): knn_val, knn_ind = torch.topk(adj, topk, dim=-1) weighted_adjacency_matrix = (torch.zeros_like(adj)).scatter_(-1, knn_ind, knn_val) return weighted_adjacency_matrix def compute_normalized_laplacian(adj): rowsum = torch.sum(adj, -1) d_inv_sqrt = torch.pow(rowsum, -0.5) d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0. d_mat_inv_sqrt = torch.diagflat(d_inv_sqrt) L_norm = torch.mm(torch.mm(d_mat_inv_sqrt, adj), d_mat_inv_sqrt) return L_norm def build_sim(context): context_norm = context.div(torch.norm(context, p=2, dim=-1, keepdim=True)) sim = torch.mm(context_norm, context_norm.transpose(1, 0)) return sim def get_sparse_laplacian(edge_index, edge_weight, num_nodes, normalization='none'): from torch_scatter import scatter_add row, col = edge_index[0], edge_index[1] deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes) if normalization == 'sym': deg_inv_sqrt = deg.pow_(-0.5) deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0) edge_weight = deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col] elif normalization == 'rw': deg_inv = 1.0 / deg deg_inv.masked_fill_(deg_inv == float('inf'), 0) edge_weight = deg_inv[row] * edge_weight return edge_index, edge_weight def get_dense_laplacian(adj, normalization='none'): if normalization == 'sym': rowsum = torch.sum(adj, -1) d_inv_sqrt = torch.pow(rowsum, -0.5) d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0. d_mat_inv_sqrt = torch.diagflat(d_inv_sqrt) L_norm = torch.mm(torch.mm(d_mat_inv_sqrt, adj), d_mat_inv_sqrt) elif normalization == 'rw': rowsum = torch.sum(adj, -1) d_inv = torch.pow(rowsum, -1) d_inv[torch.isinf(d_inv)] = 0. d_mat_inv = torch.diagflat(d_inv) L_norm = torch.mm(d_mat_inv, adj) elif normalization == 'none': L_norm = adj return L_norm def build_knn_normalized_graph(adj, topk, is_sparse, norm_type): device = adj.device knn_val, knn_ind = torch.topk(adj, topk, dim=-1) if is_sparse: tuple_list = [[row, int(col)] for row in range(len(knn_ind)) for col in knn_ind[row]] row = [i[0] for i in tuple_list] col = [i[1] for i in tuple_list] i = torch.LongTensor([row, col]).to(device) v = knn_val.flatten() edge_index, edge_weight = get_sparse_laplacian(i, v, normalization=norm_type, num_nodes=adj.shape[0]) return torch.sparse_coo_tensor(edge_index, edge_weight, adj.shape) else: weighted_adjacency_matrix = (torch.zeros_like(adj)).scatter_(-1, knn_ind, knn_val) return get_dense_laplacian(weighted_adjacency_matrix, normalization=norm_type)