[
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n/data/baby/\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\npip-wheel-metadata/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n.python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n"
  },
  {
    "path": ".idea/.gitignore",
    "content": "# Default ignored files\n/shelf/\n/workspace.xml\n# Editor-based HTTP Client requests\n/httpRequests/\n# Datasource local storage ignored files\n/dataSources/\n/dataSources.local.xml\n"
  },
  {
    "path": ".idea/MMRec.iml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<module type=\"PYTHON_MODULE\" version=\"4\">\n  <component name=\"NewModuleRootManager\">\n    <content url=\"file://$MODULE_DIR$\">\n      <sourceFolder url=\"file://$MODULE_DIR$/src\" isTestSource=\"false\" />\n    </content>\n    <orderEntry type=\"jdk\" jdkName=\"Python 3.7 (env-test) (2)\" jdkType=\"Python SDK\" />\n    <orderEntry type=\"sourceFolder\" forTests=\"false\" />\n  </component>\n  <component name=\"PyDocumentationSettings\">\n    <option name=\"format\" value=\"PLAIN\" />\n    <option name=\"myDocStringFormat\" value=\"Plain\" />\n  </component>\n</module>"
  },
  {
    "path": ".idea/deployment.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"PublishConfigData\" remoteFilesAllowedToDisappearOnAutoupload=\"false\">\n    <serverData>\n      <paths name=\"ecs-user@8.220.208.249:22\">\n        <serverdata>\n          <mappings>\n            <mapping local=\"$PROJECT_DIR$\" web=\"/\" />\n          </mappings>\n        </serverdata>\n      </paths>\n    </serverData>\n  </component>\n</project>"
  },
  {
    "path": ".idea/inspectionProfiles/Project_Default.xml",
    "content": "<component name=\"InspectionProjectProfileManager\">\n  <profile version=\"1.0\">\n    <option name=\"myName\" value=\"Project Default\" />\n    <inspection_tool class=\"DuplicatedCode\" enabled=\"false\" level=\"WEAK WARNING\" enabled_by_default=\"false\" />\n    <inspection_tool class=\"PyPackageRequirementsInspection\" enabled=\"true\" level=\"WARNING\" enabled_by_default=\"true\">\n      <option name=\"ignoredPackages\">\n        <value>\n          <list size=\"160\">\n            <item index=\"0\" class=\"java.lang.String\" itemvalue=\"numpy\" />\n            <item index=\"1\" class=\"java.lang.String\" itemvalue=\"scikit-learn\" />\n            <item index=\"2\" class=\"java.lang.String\" itemvalue=\"tensorflow\" />\n            <item index=\"3\" class=\"java.lang.String\" itemvalue=\"tables\" />\n            <item index=\"4\" class=\"java.lang.String\" itemvalue=\"statsmodels\" />\n            <item index=\"5\" class=\"java.lang.String\" itemvalue=\"wrapt\" />\n            <item index=\"6\" class=\"java.lang.String\" itemvalue=\"pandas\" />\n            <item index=\"7\" class=\"java.lang.String\" itemvalue=\"tqdm\" />\n            <item index=\"8\" class=\"java.lang.String\" itemvalue=\"scipy\" />\n            <item index=\"9\" class=\"java.lang.String\" itemvalue=\"torch\" />\n            <item index=\"10\" class=\"java.lang.String\" itemvalue=\"gensim\" />\n            <item index=\"11\" class=\"java.lang.String\" itemvalue=\"numba\" />\n            <item index=\"12\" class=\"java.lang.String\" itemvalue=\"pyDeprecate\" />\n            <item index=\"13\" class=\"java.lang.String\" itemvalue=\"torchmetrics\" />\n            <item index=\"14\" class=\"java.lang.String\" itemvalue=\"bs4\" />\n            <item index=\"15\" class=\"java.lang.String\" itemvalue=\"flair\" />\n            <item index=\"16\" class=\"java.lang.String\" itemvalue=\"srsly\" />\n            <item index=\"17\" class=\"java.lang.String\" itemvalue=\"conllu\" />\n            <item index=\"18\" class=\"java.lang.String\" itemvalue=\"mpld3\" />\n            <item index=\"19\" class=\"java.lang.String\" itemvalue=\"torchvision\" />\n            <item index=\"20\" class=\"java.lang.String\" itemvalue=\"ftfy\" />\n            <item index=\"21\" class=\"java.lang.String\" itemvalue=\"elasticsearch\" />\n            <item index=\"22\" class=\"java.lang.String\" itemvalue=\"Pygments\" />\n            <item index=\"23\" class=\"java.lang.String\" itemvalue=\"bleach\" />\n            <item index=\"24\" class=\"java.lang.String\" itemvalue=\"lxml\" />\n            <item index=\"25\" class=\"java.lang.String\" itemvalue=\"multiprocess\" />\n            <item index=\"26\" class=\"java.lang.String\" itemvalue=\"soupsieve\" />\n            <item index=\"27\" class=\"java.lang.String\" itemvalue=\"torchaudio\" />\n            <item index=\"28\" class=\"java.lang.String\" itemvalue=\"jsonschema\" />\n            <item index=\"29\" class=\"java.lang.String\" itemvalue=\"qtconsole\" />\n            <item index=\"30\" class=\"java.lang.String\" itemvalue=\"Janome\" />\n            <item index=\"31\" class=\"java.lang.String\" itemvalue=\"terminado\" />\n            <item index=\"32\" class=\"java.lang.String\" itemvalue=\"pydantic\" />\n            <item index=\"33\" class=\"java.lang.String\" itemvalue=\"transformers\" />\n            <item index=\"34\" class=\"java.lang.String\" itemvalue=\"Werkzeug\" />\n            <item index=\"35\" class=\"java.lang.String\" itemvalue=\"faiss\" />\n            <item index=\"36\" class=\"java.lang.String\" itemvalue=\"segtok\" />\n            <item index=\"37\" class=\"java.lang.String\" itemvalue=\"jupyter-client\" />\n            <item index=\"38\" class=\"java.lang.String\" itemvalue=\"jupyterlab-pygments\" />\n            <item index=\"39\" class=\"java.lang.String\" itemvalue=\"click\" />\n            <item index=\"40\" class=\"java.lang.String\" itemvalue=\"ipykernel\" />\n            <item index=\"41\" class=\"java.lang.String\" itemvalue=\"nbconvert\" />\n            <item index=\"42\" class=\"java.lang.String\" itemvalue=\"psutil\" />\n            <item index=\"43\" class=\"java.lang.String\" itemvalue=\"regex\" />\n            <item index=\"44\" class=\"java.lang.String\" itemvalue=\"tensorboard\" />\n            <item index=\"45\" class=\"java.lang.String\" itemvalue=\"cymem\" />\n            <item index=\"46\" class=\"java.lang.String\" itemvalue=\"platformdirs\" />\n            <item index=\"47\" class=\"java.lang.String\" itemvalue=\"bpemb\" />\n            <item index=\"48\" class=\"java.lang.String\" itemvalue=\"matplotlib\" />\n            <item index=\"49\" class=\"java.lang.String\" itemvalue=\"konoha\" />\n            <item index=\"50\" class=\"java.lang.String\" itemvalue=\"rank-bm25\" />\n            <item index=\"51\" class=\"java.lang.String\" itemvalue=\"murmurhash\" />\n            <item index=\"52\" class=\"java.lang.String\" itemvalue=\"lightgbm\" />\n            <item index=\"53\" class=\"java.lang.String\" itemvalue=\"jsonlines\" />\n            <item index=\"54\" class=\"java.lang.String\" itemvalue=\"pytrec-eval\" />\n            <item index=\"55\" class=\"java.lang.String\" itemvalue=\"wasabi\" />\n            <item index=\"56\" class=\"java.lang.String\" itemvalue=\"networkx\" />\n            <item index=\"57\" class=\"java.lang.String\" itemvalue=\"cffi\" />\n            <item index=\"58\" class=\"java.lang.String\" itemvalue=\"wget\" />\n            <item index=\"59\" class=\"java.lang.String\" itemvalue=\"antlr4-python3-runtime\" />\n            <item index=\"60\" class=\"java.lang.String\" itemvalue=\"datasets\" />\n            <item index=\"61\" class=\"java.lang.String\" itemvalue=\"py4j\" />\n            <item index=\"62\" class=\"java.lang.String\" itemvalue=\"requests\" />\n            <item index=\"63\" class=\"java.lang.String\" itemvalue=\"pyrsistent\" />\n            <item index=\"64\" class=\"java.lang.String\" itemvalue=\"pylcs\" />\n            <item index=\"65\" class=\"java.lang.String\" itemvalue=\"gdown\" />\n            <item index=\"66\" class=\"java.lang.String\" itemvalue=\"Deprecated\" />\n            <item index=\"67\" class=\"java.lang.String\" itemvalue=\"stack-data\" />\n            <item index=\"68\" class=\"java.lang.String\" itemvalue=\"smart-open\" />\n            <item index=\"69\" class=\"java.lang.String\" itemvalue=\"prompt-toolkit\" />\n            <item index=\"70\" class=\"java.lang.String\" itemvalue=\"ipywidgets\" />\n            <item index=\"71\" class=\"java.lang.String\" itemvalue=\"pyarrow\" />\n            <item index=\"72\" class=\"java.lang.String\" itemvalue=\"tornado\" />\n            <item index=\"73\" class=\"java.lang.String\" itemvalue=\"dpr\" />\n            <item index=\"74\" class=\"java.lang.String\" itemvalue=\"black\" />\n            <item index=\"75\" class=\"java.lang.String\" itemvalue=\"SoundFile\" />\n            <item index=\"76\" class=\"java.lang.String\" itemvalue=\"overrides\" />\n            <item index=\"77\" class=\"java.lang.String\" itemvalue=\"langcodes\" />\n            <item index=\"78\" class=\"java.lang.String\" itemvalue=\"importlib-resources\" />\n            <item index=\"79\" class=\"java.lang.String\" itemvalue=\"hydra-core\" />\n            <item index=\"80\" class=\"java.lang.String\" itemvalue=\"jupyter-console\" />\n            <item index=\"81\" class=\"java.lang.String\" itemvalue=\"typing_extensions\" />\n            <item index=\"82\" class=\"java.lang.String\" itemvalue=\"cachetools\" />\n            <item index=\"83\" class=\"java.lang.String\" itemvalue=\"debugpy\" />\n            <item index=\"84\" class=\"java.lang.String\" itemvalue=\"multidict\" />\n            <item index=\"85\" class=\"java.lang.String\" itemvalue=\"responses\" />\n            <item index=\"86\" class=\"java.lang.String\" itemvalue=\"thinc\" />\n            <item index=\"87\" class=\"java.lang.String\" itemvalue=\"yarl\" />\n            <item index=\"88\" class=\"java.lang.String\" itemvalue=\"pytz\" />\n            <item index=\"89\" class=\"java.lang.String\" itemvalue=\"Pillow\" />\n            <item index=\"90\" class=\"java.lang.String\" itemvalue=\"traitlets\" />\n            <item index=\"91\" class=\"java.lang.String\" itemvalue=\"protobuf\" />\n            <item index=\"92\" class=\"java.lang.String\" itemvalue=\"beir\" />\n            <item index=\"93\" class=\"java.lang.String\" itemvalue=\"threadpoolctl\" />\n            <item index=\"94\" class=\"java.lang.String\" itemvalue=\"huggingface-hub\" />\n            <item index=\"95\" class=\"java.lang.String\" itemvalue=\"nbclient\" />\n            <item index=\"96\" class=\"java.lang.String\" itemvalue=\"QtPy\" />\n            <item index=\"97\" class=\"java.lang.String\" itemvalue=\"tinycss2\" />\n            <item index=\"98\" class=\"java.lang.String\" itemvalue=\"frozenlist\" />\n            <item index=\"99\" class=\"java.lang.String\" itemvalue=\"submitit\" />\n            <item index=\"100\" class=\"java.lang.String\" itemvalue=\"fsspec\" />\n            <item index=\"101\" class=\"java.lang.String\" itemvalue=\"spacy\" />\n            <item index=\"102\" class=\"java.lang.String\" itemvalue=\"sqlitedict\" />\n            <item index=\"103\" class=\"java.lang.String\" itemvalue=\"filelock\" />\n            <item index=\"104\" class=\"java.lang.String\" itemvalue=\"jupyterlab-widgets\" />\n            <item index=\"105\" class=\"java.lang.String\" itemvalue=\"pyzmq\" />\n            <item index=\"106\" class=\"java.lang.String\" itemvalue=\"sentencepiece\" />\n            <item index=\"107\" class=\"java.lang.String\" itemvalue=\"certifi\" />\n            <item index=\"108\" class=\"java.lang.String\" itemvalue=\"pyserini\" />\n            <item index=\"109\" class=\"java.lang.String\" itemvalue=\"nmslib\" />\n            <item index=\"110\" class=\"java.lang.String\" itemvalue=\"pyparsing\" />\n            <item index=\"111\" class=\"java.lang.String\" itemvalue=\"Markdown\" />\n            <item index=\"112\" class=\"java.lang.String\" itemvalue=\"notebook\" />\n            <item index=\"113\" class=\"java.lang.String\" itemvalue=\"xxhash\" />\n            <item index=\"114\" class=\"java.lang.String\" itemvalue=\"tokenizers\" />\n            <item index=\"115\" class=\"java.lang.String\" itemvalue=\"sacremoses\" />\n            <item index=\"116\" class=\"java.lang.String\" itemvalue=\"langdetect\" />\n            <item index=\"117\" class=\"java.lang.String\" itemvalue=\"pyjnius\" />\n            <item index=\"118\" class=\"java.lang.String\" itemvalue=\"kiwisolver\" />\n            <item index=\"119\" class=\"java.lang.String\" itemvalue=\"pathy\" />\n            <item index=\"120\" class=\"java.lang.String\" itemvalue=\"Wikipedia-API\" />\n            <item index=\"121\" class=\"java.lang.String\" itemvalue=\"catalogue\" />\n            <item index=\"122\" class=\"java.lang.String\" itemvalue=\"omegaconf\" />\n            <item index=\"123\" class=\"java.lang.String\" itemvalue=\"fonttools\" />\n            <item index=\"124\" class=\"java.lang.String\" itemvalue=\"pytorch-lightning\" />\n            <item index=\"125\" class=\"java.lang.String\" itemvalue=\"widgetsnbextension\" />\n            <item index=\"126\" class=\"java.lang.String\" itemvalue=\"charset-normalizer\" />\n            <item index=\"127\" class=\"java.lang.String\" itemvalue=\"matplotlib-inline\" />\n            <item index=\"128\" class=\"java.lang.String\" itemvalue=\"async-timeout\" />\n            <item index=\"129\" class=\"java.lang.String\" itemvalue=\"spacy-loggers\" />\n            <item index=\"130\" class=\"java.lang.String\" itemvalue=\"more-itertools\" />\n            <item index=\"131\" class=\"java.lang.String\" itemvalue=\"cloudpickle\" />\n            <item index=\"132\" class=\"java.lang.String\" itemvalue=\"llvmlite\" />\n            <item index=\"133\" class=\"java.lang.String\" itemvalue=\"spacy-legacy\" />\n            <item index=\"134\" class=\"java.lang.String\" itemvalue=\"rouge\" />\n            <item index=\"135\" class=\"java.lang.String\" itemvalue=\"importlib-metadata\" />\n            <item index=\"136\" class=\"java.lang.String\" itemvalue=\"Jinja2\" />\n            <item index=\"137\" class=\"java.lang.String\" itemvalue=\"preshed\" />\n            <item index=\"138\" class=\"java.lang.String\" itemvalue=\"onnxruntime\" />\n            <item index=\"139\" class=\"java.lang.String\" itemvalue=\"blis\" />\n            <item index=\"140\" class=\"java.lang.String\" itemvalue=\"urllib3\" />\n            <item index=\"141\" class=\"java.lang.String\" itemvalue=\"Cython\" />\n            <item index=\"142\" class=\"java.lang.String\" itemvalue=\"pptree\" />\n            <item index=\"143\" class=\"java.lang.String\" itemvalue=\"pymongo\" />\n            <item index=\"144\" class=\"java.lang.String\" itemvalue=\"typer\" />\n            <item index=\"145\" class=\"java.lang.String\" itemvalue=\"faiss-cpu\" />\n            <item index=\"146\" class=\"java.lang.String\" itemvalue=\"pytest\" />\n            <item index=\"147\" class=\"java.lang.String\" itemvalue=\"hyperopt\" />\n            <item index=\"148\" class=\"java.lang.String\" itemvalue=\"nbformat\" />\n            <item index=\"149\" class=\"java.lang.String\" itemvalue=\"ipython\" />\n            <item index=\"150\" class=\"java.lang.String\" itemvalue=\"sentence-transformers\" />\n            <item index=\"151\" class=\"java.lang.String\" itemvalue=\"dill\" />\n            <item index=\"152\" class=\"java.lang.String\" itemvalue=\"fastjsonschema\" />\n            <item index=\"153\" class=\"java.lang.String\" itemvalue=\"prometheus-client\" />\n            <item index=\"154\" class=\"java.lang.String\" itemvalue=\"pybind11\" />\n            <item index=\"155\" class=\"java.lang.String\" itemvalue=\"aiohttp\" />\n            <item index=\"156\" class=\"java.lang.String\" itemvalue=\"grpcio\" />\n            <item index=\"157\" class=\"java.lang.String\" itemvalue=\"aiosignal\" />\n            <item index=\"158\" class=\"java.lang.String\" itemvalue=\"google-auth\" />\n            <item index=\"159\" class=\"java.lang.String\" itemvalue=\"recbole\" />\n          </list>\n        </value>\n      </option>\n    </inspection_tool>\n  </profile>\n</component>"
  },
  {
    "path": ".idea/inspectionProfiles/profiles_settings.xml",
    "content": "<component name=\"InspectionProjectProfileManager\">\n  <settings>\n    <option name=\"USE_PROJECT_PROFILE\" value=\"false\" />\n    <version value=\"1.0\" />\n  </settings>\n</component>"
  },
  {
    "path": ".idea/misc.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"ProjectRootManager\" version=\"2\" project-jdk-name=\"Python 3.7 (env-test) (2)\" project-jdk-type=\"Python SDK\" />\n</project>"
  },
  {
    "path": ".idea/modules.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"ProjectModuleManager\">\n    <modules>\n      <module fileurl=\"file://$PROJECT_DIR$/.idea/MMRec.iml\" filepath=\"$PROJECT_DIR$/.idea/MMRec.iml\" />\n    </modules>\n  </component>\n</project>"
  },
  {
    "path": ".idea/vcs.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"VcsDirectoryMappings\">\n    <mapping directory=\"$PROJECT_DIR$\" vcs=\"Git\" />\n  </component>\n</project>"
  },
  {
    "path": "LICENSE",
    "content": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU General Public License is a free, copyleft license for\nsoftware and other kinds of works.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nthe GNU General Public License is intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.  We, the Free Software Foundation, use the\nGNU General Public License for most of our software; it applies also to\nany other work released this way by its authors.  You can apply it to\nyour programs, too.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  To protect your rights, we need to prevent others from denying you\nthese rights or asking you to surrender the rights.  Therefore, you have\ncertain responsibilities if you distribute copies of the software, or if\nyou modify it: responsibilities to respect the freedom of others.\n\n  For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must pass on to the recipients the same\nfreedoms that you received.  You must make sure that they, too, receive\nor can get the source code.  And you must show them these terms so they\nknow their rights.\n\n  Developers that use the GNU GPL protect your rights with two steps:\n(1) assert copyright on the software, and (2) offer you this License\ngiving you legal permission to copy, distribute and/or modify it.\n\n  For the developers' and authors' protection, the GPL clearly explains\nthat there is no warranty for this free software.  For both users' and\nauthors' sake, the GPL requires that modified versions be marked as\nchanged, so that their problems will not be attributed erroneously to\nauthors of previous versions.\n\n  Some devices are designed to deny users access to install or run\nmodified versions of the software inside them, although the manufacturer\ncan do so.  This is fundamentally incompatible with the aim of\nprotecting users' freedom to change the software.  The systematic\npattern of such abuse occurs in the area of products for individuals to\nuse, which is precisely where it is most unacceptable.  Therefore, we\nhave designed this version of the GPL to prohibit the practice for those\nproducts.  If such problems arise substantially in other domains, we\nstand ready to extend this provision to those domains in future versions\nof the GPL, as needed to protect the freedom of users.\n\n  Finally, every program is threatened constantly by software patents.\nStates should not allow patents to restrict development and use of\nsoftware on general-purpose computers, but in those that do, we wish to\navoid the special danger that patents applied to a free program could\nmake it effectively proprietary.  To prevent this, the GPL assures that\npatents cannot be used to render the program non-free.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Use with the GNU Affero General Public License.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU Affero General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the special requirements of the GNU Affero General Public License,\nsection 13, concerning interaction through a network will apply to the\ncombination as such.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU General Public License from time to time.  Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License\n    along with this program.  If not, see <https://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If the program does terminal interaction, make it output a short\nnotice like this when it starts in an interactive mode:\n\n    <program>  Copyright (C) <year>  <name of author>\n    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n    This is free software, and you are welcome to redistribute it\n    under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License.  Of course, your program's commands\nmight be different; for a GUI interface, you would use an \"about box\".\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU GPL, see\n<https://www.gnu.org/licenses/>.\n\n  The GNU General Public License does not permit incorporating your program\ninto proprietary programs.  If your program is a subroutine library, you\nmay consider it more useful to permit linking proprietary applications with\nthe library.  If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License.  But first, please read\n<https://www.gnu.org/licenses/why-not-lgpl.html>.\n"
  },
  {
    "path": "README.md",
    "content": "# MMRec\n\n<div align=\"center\">\n  <a href=\"https://github.com/enoche/MultimodalRecSys\"><img width=\"300px\" height=\"auto\" src=\"https://github.com/enoche/MMRec/blob/master/images/logo.png\"></a>\n</div>\n\n\n$\\text{MMRec}$: A modern <ins>M</ins>ulti<ins>M</ins>odal <ins>Rec</ins>ommendation toolbox that simplifies your research [arXiv](https://arxiv.org/abs/2302.03497).  \n:point_right: Check our [comprehensive survey on MMRec, arXiv](https://arxiv.org/abs/2302.04473).   \n:point_right: Check the awesome [multimodal recommendation resources](https://github.com/enoche/MultimodalRecSys).  \n\n## Toolbox\n<p>\n<img src=\"./images/MMRec.png\" width=\"500\">\n</p>\n\n## Supported Models\nsource code at: `src\\models`\n\n| **Model**       | **Paper**                                                                                             | **Conference/Journal** | **Code**    |\n|------------------|--------------------------------------------------------------------------------------------------------|------------------------|-------------|\n| **General models**  |                                                                                                        |                        |             |\n| SelfCF              | [SelfCF: A Simple Framework for Self-supervised Collaborative Filtering](https://arxiv.org/abs/2107.03019)                                 | ACM TORS'23            | selfcfed_lgn.py  |\n| LayerGCN            | [Layer-refined Graph Convolutional Networks for Recommendation](https://arxiv.org/abs/2207.11088)                                          | ICDE'23                | layergcn.py  |\n| **Multimodal models**  |                                                                                                        |                        |             |\n| VBPR              | [VBPR: Visual Bayesian Personalized Ranking from Implicit Feedback](https://arxiv.org/abs/1510.01784)                                              | AAAI'16                 | vbpr.py      |\n| MMGCN             | [MMGCN: Multi-modal Graph Convolution Network for Personalized Recommendation of Micro-video](https://staff.ustc.edu.cn/~hexn/papers/mm19-MMGCN.pdf)               | MM'19                  | mmgcn.py  |\n| ItemKNNCBF             | [Are We Really Making Much Progress? A Worrying Analysis of Recent Neural Recommendation Approaches](https://arxiv.org/abs/1907.06902)               | RecSys'19              | itemknncbf.py  |\n| GRCN              | [Graph-Refined Convolutional Network for Multimedia Recommendation with Implicit Feedback](https://arxiv.org/abs/2111.02036)            | MM'20                  | grcn.py    |\n| MVGAE             | [Multi-Modal Variational Graph Auto-Encoder for Recommendation Systems](https://ieeexplore.ieee.org/abstract/document/9535249)              | TMM'21                 | mvgae.py   |\n| DualGNN           | [DualGNN: Dual Graph Neural Network for Multimedia Recommendation](https://ieeexplore.ieee.org/abstract/document/9662655)                   | TMM'21                 | dualgnn.py   |\n| LATTICE           | [Mining Latent Structures for Multimedia Recommendation](https://arxiv.org/abs/2104.09036)                                               | MM'21                  | lattice.py  |\n| SLMRec            | [Self-supervised Learning for Multimedia Recommendation](https://ieeexplore.ieee.org/document/9811387) | TMM'22                 |                  slmrec.py |\n| **Newly added**  |                                                                                                        |                        |             |\n| BM3         | [Bootstrap Latent Representations for Multi-modal Recommendation](https://dl.acm.org/doi/10.1145/3543507.3583251)                                          | WWW'23                 | bm3.py |\n| FREEDOM | [A Tale of Two Graphs: Freezing and Denoising Graph Structures for Multimodal Recommendation](https://arxiv.org/abs/2211.06924)                                 | MM'23                  | freedom.py  |\n| MGCN     | [Multi-View Graph Convolutional Network for Multimedia Recommendation](https://arxiv.org/abs/2308.03588)                       | MM'23               | mgcn.py          |\n| DRAGON  | [Enhancing Dyadic Relations with Homogeneous Graphs for Multimodal Recommendation](https://arxiv.org/abs/2301.12097)                                 | ECAI'23                | dragon.py  |\n| MG  | [Mirror Gradient: Towards Robust Multimodal Recommender Systems via Exploring Flat Local Minima](https://arxiv.org/abs/2402.11262)                                 | WWW'24                | common/trainer.py  |\n| LGMRec  | [LGMRec: Local and Global Graph Learning for Multimodal Recommendation](https://arxiv.org/abs/2312.16400)                                 | AAAI'24                | lgmrec.py |\n| DA-MRS  | [Improving Multi-modal Recommender Systems by Denoising and Aligning Multi-modal Content and User Feedback](https://dl.acm.org/doi/10.1145/3637528.3671703)                   | KDD'24                | damrs.py |\n| SMORE   | [Spectrum-based Modality Representation Fusion Graph Convolutional Network for Multimodal Recommendation](https://arxiv.org/abs/2412.14978) | WSDM'25           | smore.py  |\n| PGL | [Mind Individual Information! Principal Graph Learning for Multimedia Recommendation](https://ojs.aaai.org/index.php/AAAI/article/view/33429) | AAAI'25 | pgl.py |\n\n\n#### Please consider to cite our paper if this framework helps you, thanks:\n```\n@inproceedings{zhou2023bootstrap,\nauthor = {Zhou, Xin and Zhou, Hongyu and Liu, Yong and Zeng, Zhiwei and Miao, Chunyan and Wang, Pengwei and You, Yuan and Jiang, Feijun},\ntitle = {Bootstrap Latent Representations for Multi-Modal Recommendation},\nbooktitle = {Proceedings of the ACM Web Conference 2023},\npages = {845–854},\nyear = {2023}\n}\n\n@article{zhou2023comprehensive,\n      title={A Comprehensive Survey on Multimodal Recommender Systems: Taxonomy, Evaluation, and Future Directions}, \n      author={Hongyu Zhou and Xin Zhou and Zhiwei Zeng and Lingzi Zhang and Zhiqi Shen},\n      year={2023},\n      journal={arXiv preprint arXiv:2302.04473},\n}\n\n@inproceedings{zhou2023mmrec,\n  title={Mmrec: Simplifying multimodal recommendation},\n  author={Zhou, Xin},\n  booktitle={Proceedings of the 5th ACM International Conference on Multimedia in Asia Workshops},\n  pages={1--2},\n  year={2023}\n}\n```\n"
  },
  {
    "path": "data/README.md",
    "content": "\n## Data\nDownload from Google Drive: [Baby/Sports/Elec](https://drive.google.com/drive/folders/13cBy1EA_saTUuXxVllKgtfci2A09jyaG?usp=sharing)\nThe data already contains text and image features extracted from Sentence-Transformers and CNN.\n\nAn alternative dataset for short-video recommendations: [MicroLens](https://drive.google.com/drive/folders/14UyTAh_YyDV8vzXteBJiy9jv8TBDK43w?usp=drive_link).\nThanks to @yxni98!\n\n* Please move your downloaded data into this dir for model training.\n"
  },
  {
    "path": "evaluation/README.md",
    "content": "# EVALUATING THE SOTA MODELS\n\nwe validate the effectiveness and efficiency of state-of-the-art multimodal recommendation models by conducting extensive experiments on four public datasets. Furthermore, we investigate the principal determinants of model performance, including the impact of different modality information and data split methods.\n\n## Statistics of the evaluated datasets.\n| Datasets | # Users | # Items | # Interactions |Sparsity|\n|----------|--------|---------|---------|---------|\n| Baby     | 19,445     | 7,050     |160,792|99.8827%|\n| Sports   | 35,598      | 18,357   |296,337|99.9547%|\n| FoodRec     | 61,668      | 21,874    |1,654,456|99.8774%|\n| Elec     | 192,403      | 63,001     |1,689,188|99.9861%|\n\n\n## Experimental Results\n| Dataset                 | Model    | Recall@10          | Recall@20          | Recall@50          | NDCG@10            | NDCG@20            | NDCG@50            |\n|-------------------------|----------|--------------------|--------------------|--------------------|--------------------|--------------------|--------------------|\n| **Baby**   | BPR      | 0.0357             | 0.0575             | 0.1054             | 0.0192             | 0.0249             | 0.0345             |\n|                         | LightGCN | 0.0479             | 0.0754             | 0.1333             | 0.0257             | 0.0328             | 0.0445             |\n|                         | VBPR     | 0.0423             | 0.0663             | 0.1212             | 0.0223             | 0.0284             | 0.0396             |\n|                         | MMGCN    | 0.0378             | 0.0615             | 0.1100             | 0.0200             | 0.0261             | 0.0359             |\n|                         | DualGNN  | 0.0448             | 0.0716             | 0.1288             | 0.0240             | 0.0309             | 0.0424             |\n|                         | GRCN     | 0.0539             | 0.0833             | 0.1464             | 0.0288             | 0.0363             | 0.0490             |\n|                         | LATTICE  | 0.0547             | 0.0850             | 0.1477             | 0.0292             | 0.0370             | 0.0497             |\n|                         | BM3      | 0.0564             | 0.0883             | 0.1477             | 0.0301             | 0.0383             | 0.0502             |\n|                         | SLMRec   | 0.0529             | 0.0775             | 0.1252             | 0.0290             | 0.0353             | 0.0450             |\n|                         | ADDVAE   | _0.0598_ | _0.091_  | _0.1508_ | _0.0323_ | _0.0404_ | _0.0525_ |\n|                         | FREEDOM  | **0.0627**    | **0.0992**    | **0.1655**    | **0.0330**    | **0.0424**    | **0.0558**    |\n| **Sports**  | BPR      | 0.0432             | 0.0653             | 0.1083             | 0.0241             | 0.0298             | 0.0385             |\n|                         | LightGCN | 0.0569             | 0.0864             | 0.1414             | 0.0311             | 0.0387             | 0.0498             |\n|                         | VBPR     | 0.0558             | 0.0856             | 0.1391             | 0.0307             | 0.0384             | 0.0492             |\n|                         | MMGCN    | 0.0370             | 0.0605             | 0.1078             | 0.0193             | 0.0254             | 0.0350             |\n|                         | DualGNN  | 0.0568             | 0.0859             | 0.1392             | 0.0310             | 0.0385             | 0.0493             |\n|                         | GRCN     | 0.0598             | 0.0915             | 0.1509             | 0.0332             | 0.0414             | 0.0535             |\n|                         | LATTICE  | 0.0620             | 0.0953             | 0.1561             | 0.0335             | 0.0421             | 0.0544             |\n|                         | BM3      | 0.0656             | 0.0980             | 0.1581             | 0.0355             | 0.0438             | 0.0561             |\n|                         | SLMRec   | 0.0663             | 0.0990             | 0.1543             | 0.0365             | 0.0450             | 0.0562             |\n|                         | ADDVAE   | _0.0709_ | _0.1035_ | _0.1663_ | _0.0389_    | _0.0473_ | _0.0600_ |\n|                         | FREEDOM  | **0.0717**    | **0.1089**    | **0.1768**    | **0.0385** | **0.0481**    | **0.0618**    |\n| **FoodRec** | BPR      | 0.0303             | 0.0511             | 0.0948             | 0.0188             | 0.0250             | 0.0356             |\n|                         | LightGCN | 0.0331             | 0.0546             | 0.1003             | 0.0210             | 0.0274             | 0.0386             |\n|                         | VBPR     | 0.0306             | 0.0516             | 0.0972             | 0.0191             | 0.0254             | 0.0365             |\n|                         | MMGCN    | 0.0307             | 0.0510             | 0.0943             | 0.0192             | 0.0253             | 0.0359             |\n|                         | DualGNN  | _0.0338_ | 0.0559             | _0.1027_ | _0.0214_ | _0.0280_ | _0.0394_ |\n|                         | GRCN     | **0.0356**   | **0.0578**    | **0.1063**    | **0.0226**    | **0.0295**    | **0.0411**    |\n|                         | LATTICE  | 0.0336             | _0.0560_| 0.1012             | 0.0211             | 0.0277             | 0.0388             |\n|                         | BM3      | 0.0334             | 0.0553             | 0.0994             | 0.0208             | 0.0274             | 0.0381             |\n|                         | SLMRec   | 0.0323             | 0.0515             | 0.0907             | 0.0208             | 0.0266             | 0.0362             |\n|                         | ADDVAE   | 0.0309             | 0.0508             | 0.093              | 0.0186             | 0.0247             | 0.035              |\n|                         | FREEDOM  | 0.0333             | 0.0556             | 0.1009             | 0.0212             | 0.0279             | 0.0389             |\n| **Elec**    | BPR      | 0.0235             | 0.0367             | 0.0621             | 0.0127             | 0.0161             | 0.0212             |\n|                         | LightGCN | 0.0363             | 0.0540             | 0.0879             | 0.0204             | 0.0250             | 0.0318             |\n|                         | VBPR     | 0.0293             | 0.0458             | 0.0778             | 0.0159             | 0.0202             | 0.0267             |\n|                         | MMGCN    | 0.0213             | 0.0343             | 0.0610             | 0.0112             | 0.0146             | 0.0200             |\n|                         | DualGNN  | 0.0365             | 0.0542             | 0.0875             | 0.0206             | 0.0252             | 0.0319             |\n|                         | GRCN     | 0.0389             | 0.0590             | 0.0970             | 0.0216             | 0.0268             | 0.0345             |\n|                         | LATTICE  | -                  | -                  | -                  | -                  | -                  | -                  |\n|                         | BM3      | 0.0437             | 0.0648             | 0.1021             | 0.0247             | 0.0302             | 0.0378             |\n|                         | SLMRec   | _0.0443_ | _0.0651_ | _0.1038_ | _0.0249_ | _0.0303_ | _0.0382_ |\n|                         | ADDVAE   | **0.0451**    | **0.0665**    | **0.1066**    | **0.0253**    | **0.0308**    | **0.0390**    |\n|                         | FREEDOM  | 0.0396             | 0.0601             | 0.0998             | 0.0220             | 0.0273             | 0.0353             |\n\n### Ablation Study\n\n#### Recommendation performance comparison using different data split methods.:\n\nWe evaluate the performance of various recommendation models using different data splitting methods. The offline evaluation is based on the historical item ratings or the implicit item feedback. As this method relies on the user-item interactions and the models are all learning based on the supervised signals, we need to split the interactions into train, validation and test sets. There are three main split strategies that we applied to compare the performance:\n\n• Random split: As the name suggested, this split strategy randomly selects the train and test boundary for each user, which selects to split the interactions according to the ratio. The disadvantage of the random splitting strategy is that they are not capable to reproduce unless the authors publish how the data split and this is not a realistic scenario without considering the time.\n\n• User time split: The temporal split strategy splits the historical interactions based on the interaction timestamp by the ratio (e.g., train:validation:test=8:1:1). It split the last percentage of interactions the user made as the test set. Although it considers the timestamp, it is still not a realistic scenario because it is still splitting the train/test sets among all the interactions one user made but did not consider the global time.\n\n• Global time split: The global time splitting strategy fixed the time point shared by all users according to the splitting ratio. The interactions after the last time point are split as the test set. Additionally, the users of the interactions after the global temporal boundary must be in the training set, which follows the most realistic and strict settings. The limitation of this strategy is that the number of users will be reduced due to the reason that the users not existing in the training set will be deleted\n\nOur experiments on the Sports dataset, using these three splitting strategies, provide insights into their impact on recommendation performance. The table below presents the performance comparison results in terms of Recall@k and NDCG@k where k=10,20, and the second table shows the performance ranking of models based on Recall@20 and NDCG@20.\n\n| Dataset | Model    |          | Recall@10 |             |          | Recall@20 |             |\n|---------|----------|----------|-----------|-------------|----------|-----------|-------------|\n|         |          | Random   | User Time | Global Time | Random   | User Time | Global Time |\n|         | MMGCN    | 0.0384   | 0.0266    | 0.0140      | 0.0611   | 0.0446    | 0.0245      |\n|         | BPR      | 0.0444   | 0.0322    | 0.0152      | 0.0663   | 0.0509    | 0.0258      |\n|         | VBPR     | 0.0563   | 0.0385    | 0.0176      | 0.0851   | 0.0620    | 0.0298      |\n|         | DualGNN  | 0.0576   | 0.0403    | 0.0181      | 0.0859   | 0.0611    | 0.0297      |\n| sports  | GRCN     | 0.0604   | 0.0418    | 0.0167      | 0.0915   | 0.0666    | 0.0286      |\n|         | LightGCN | 0.0568   | 0.0405    | 0.0205      | 0.0863   | 0.0663    | 0.0336      |\n|         | LATTICE  | 0.0641   | 0.0450    | 0.0207      | 0.0964   | 0.0699    | 0.0337      |\n|         | BM3      | 0.0646   | 0.0447    | 0.0213      | 0.0955   | 0.0724    | 0.0336      |\n|         | SLMRec   | 0.0651   | 0.0470    | 0.0220      | 0.0985   | 0.0733    | 0.0350      |\n|         | FREEDOM  | 0.0708   | 0.0490    | 0.0226      | 0.1080   | 0.0782    | 0.0372      |\n| Dataset | Model    |          | NDCG@10   |             |          | NDCG@20   |             |\n|         |          | Random   | User Time | Global Time | Random   | User Time | Global Time |\n|         | MMGCN    | 0.0202   | 0.0134    | 0.0091      | 0.0261   | 0.0180    | 0.0125      |\n|         | BPR      | 0.0245   | 0.0169    | 0.0102      | 0.0302   | 0.0218    | 0.0135      |\n|         | VBPR     | 0.0304   | 0.0204    | 0.0115      | 0.0378   | 0.0265    | 0.0153      |\n|         | DualGNN  | 0.0321   | 0.0214    | 0.0118      | 0.0394   | 0.0268    | 0.0155      |\n| sports  | GRCN     | 0.0332   | 0.0219    | 0.0101      | 0.0412   | 0.0282    | 0.0138      |\n|         | LightGCN | 0.0315   | 0.0220    | 0.0139      | 0.0391   | 0.0286    | 0.0180      |\n|         | LATTICE  | 0.0351   | 0.0238    | 0.0138      | 0.0434   | 0.0302    | 0.0177      |\n|         | BM3      | 0.0356   | 0.0237    | 0.0144      | 0.0436   | 0.0308    | 0.0182      |\n|         | SLMRec   | 0.0364   | 0.0253    | 0.0148      | 0.0450   | 0.0321    | 0.0189      |\n|         | FREEDOM  | 0.0388   | 0.0255    | 0.0151      | 0.0485   | 0.0330    | 0.0197      |\n\nAs demonstrated above, different data splitting strategies lead to varied performance outcomes for the same dataset and evaluation metrics. This variability presents a challenge in comparing the effectiveness of different models when they are based on different data split strategies.\n\n|  Model   |        | Sports, NDCG@20   |             |\n|----------|--------|-------------------|-------------|\n|          | Random | User Time         | Global Time |\n| MMGCN    | 10     | 10                | 10          |\n| BPR      | 9      | 9                 | 8↑1         |\n| VBPR     | 8      | 8                 | 7↑1         |\n| LightGCN | 7      | 5↑2               | 4↑3         |\n| DualGNN  | 6      | 7↓1               | 6           |\n| DRCN     | 5      | 6↓1               | 9↓4         |\n| LATTICE  | 4      | 4                 | 5↓1         |\n| BM3      | 3      | 3                 | 3           |\n| SLMRec   | 2      | 2                 | 2           |\n| FREEDOM  | 1      | 1                 | 1           |\n| **Model**    |        | **Sports, Recall@20** |             |\n|          | Random | User Time         | Global Time |\n| MMGCN    | 10     | 10                | 10          |\n| BPR      | 9      | 9                 | 9           |\n| VBPR     | 8      | 7↑1               | 6↑2         |\n| DualGNN  | 7      | 8↓1               | 7           |\n| LightGCN | 6      | 6                 | 5↑1         |\n| GRCN     | 5      | 5                 | 8↓3         |\n| BM3      | 4      | 3↑1               | 4           |\n| LATTICE  | 3      | 4↓1               | 3           |\n| SLMRec   | 2      | 2                 | 2           |\n| FREEDOM  | 1      | 1                 | 1           |\n\nThe above table reports the ranks of SOTA models under each splitting strategy. The rows are sorted by the performance of models under random splitting strategy, with the up and down arrows indicating the relative rank position swaps compared with random splitting. As we can see, the ranking swaps are observed between the models under different splitting strategies\n\n#### Recommendation performance comparison using Different Modalities\nWe are interested in how the modality information benefits the recommendation, and which modality contributes more. We aim to understand the specific benefits of different modalities in recommendation systems and provide guidelines for researchers on selecting appropriate modalities. We evaluate it by feeding the single modality information, and compare the performance between using both modalities and the single modality. \n\nThe following figure is based on Recall@20 to show the summary and tendency of other modalities, visually summarize the impact of different modalities on various models. The orange point represents the performance of multi-modality, the green one represents the performance of textual modality and the blue point is for visual modality. The specific numerical values will be shown in our github.\n\n\n<img src=\"https://github.com/hongyurain/Recommendation-with-modality-information/blob/main/IMG/modality-baby.jpg\" alt=\"image-1\" height=\"50%\" width=\"50%\" /><img src=\"https://github.com/hongyurain/Recommendation-with-modality-information/blob/main/IMG/modality-sports.jpg\" alt=\"image-2\" height=\"50%\" width=\"50%\" />\n\n## Please consider to cite our paper those results helps you, thanks:\n```\n\n"
  },
  {
    "path": "preprocessing/0rating2inter.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"# 从ratings_Sports_and_Outdoors.csv文件中提取U-I交互图, 5-core后重新编号\\n\",\n    \"- Extracting U-I interactions and performing 5-core, re-indexing\\n\",\n    \"- dataset located at: http://jmcauley.ucsd.edu/data/amazon/links.html, rating only file in \\\"Small\\\" subsets for experimentation\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 1,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import os, csv\\n\",\n    \"import pandas as pd\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"outputs\": [],\n   \"source\": [\n    \"os.chdir('/home/enoche/MMRec/Sports14')\\n\",\n    \"os.getcwd()\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\",\n     \"is_executing\": true\n    }\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"## 先5-core过滤\\n\",\n    \"## 5-core filtering\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 3,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"shape: (3268695, 4)\\n\"\n     ]\n    },\n    {\n     \"data\": {\n      \"text/plain\": \"           userID      itemID  rating   timestamp\\n0  A3PMSRCL80KSA1  0000031852     4.0  1388275200\\n1  A1SNLWGLFXD70K  0000031852     4.0  1392940800\\n2  A1KJ4CVG87QW09  0000031852     4.0  1389657600\\n3    AA9ITO6ZLZW6  0000031852     5.0  1399507200\\n4    APJ5ULJ1RMZ4  0000031852     1.0  1398556800\",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>userID</th>\\n      <th>itemID</th>\\n      <th>rating</th>\\n      <th>timestamp</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>0</th>\\n      <td>A3PMSRCL80KSA1</td>\\n      <td>0000031852</td>\\n      <td>4.0</td>\\n      <td>1388275200</td>\\n    </tr>\\n    <tr>\\n      <th>1</th>\\n      <td>A1SNLWGLFXD70K</td>\\n      <td>0000031852</td>\\n      <td>4.0</td>\\n      <td>1392940800</td>\\n    </tr>\\n    <tr>\\n      <th>2</th>\\n      <td>A1KJ4CVG87QW09</td>\\n      <td>0000031852</td>\\n      <td>4.0</td>\\n      <td>1389657600</td>\\n    </tr>\\n    <tr>\\n      <th>3</th>\\n      <td>AA9ITO6ZLZW6</td>\\n      <td>0000031852</td>\\n      <td>5.0</td>\\n      <td>1399507200</td>\\n    </tr>\\n    <tr>\\n      <th>4</th>\\n      <td>APJ5ULJ1RMZ4</td>\\n      <td>0000031852</td>\\n      <td>1.0</td>\\n      <td>1398556800</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 3,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"df = pd.read_csv('ratings_Sports_and_Outdoors.csv', names=['userID', 'itemID', 'rating', 'timestamp'], header=None)\\n\",\n    \"print(f'shape: {df.shape}')\\n\",\n    \"df[:5]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 4,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"After dropped: (3268695, 4)\\n\"\n     ]\n    },\n    {\n     \"data\": {\n      \"text/plain\": \"           userID      itemID  rating   timestamp\\n0  A3PMSRCL80KSA1  0000031852     4.0  1388275200\\n1  A1SNLWGLFXD70K  0000031852     4.0  1392940800\\n2  A1KJ4CVG87QW09  0000031852     4.0  1389657600\",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>userID</th>\\n      <th>itemID</th>\\n      <th>rating</th>\\n      <th>timestamp</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>0</th>\\n      <td>A3PMSRCL80KSA1</td>\\n      <td>0000031852</td>\\n      <td>4.0</td>\\n      <td>1388275200</td>\\n    </tr>\\n    <tr>\\n      <th>1</th>\\n      <td>A1SNLWGLFXD70K</td>\\n      <td>0000031852</td>\\n      <td>4.0</td>\\n      <td>1392940800</td>\\n    </tr>\\n    <tr>\\n      <th>2</th>\\n      <td>A1KJ4CVG87QW09</td>\\n      <td>0000031852</td>\\n      <td>4.0</td>\\n      <td>1389657600</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 4,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"k_core = 5\\n\",\n    \"learner_id, course_id, tmstmp_str = 'userID', 'itemID', 'timestamp'\\n\",\n    \"\\n\",\n    \"df.dropna(subset=[learner_id, course_id, tmstmp_str], inplace=True)\\n\",\n    \"df.drop_duplicates(subset=[learner_id, course_id, tmstmp_str], inplace=True)\\n\",\n    \"print(f'After dropped: {df.shape}')\\n\",\n    \"df[:3]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 5,\n   \"outputs\": [],\n   \"source\": [\n    \"from collections import Counter\\n\",\n    \"import numpy as np\\n\",\n    \"\\n\",\n    \"min_u_num, min_i_num = 5, 5\\n\",\n    \"\\n\",\n    \"def get_illegal_ids_by_inter_num(df, field, max_num=None, min_num=None):\\n\",\n    \"    if field is None:\\n\",\n    \"        return set()\\n\",\n    \"    if max_num is None and min_num is None:\\n\",\n    \"        return set()\\n\",\n    \"\\n\",\n    \"    max_num = max_num or np.inf\\n\",\n    \"    min_num = min_num or -1\\n\",\n    \"\\n\",\n    \"    ids = df[field].values\\n\",\n    \"    inter_num = Counter(ids)\\n\",\n    \"    ids = {id_ for id_ in inter_num if inter_num[id_] < min_num or inter_num[id_] > max_num}\\n\",\n    \"    print(f'{len(ids)} illegal_ids_by_inter_num, field={field}')\\n\",\n    \"\\n\",\n    \"    return ids\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"def filter_by_k_core(df):\\n\",\n    \"    while True:\\n\",\n    \"        ban_users = get_illegal_ids_by_inter_num(df, field=learner_id, max_num=None, min_num=min_u_num)\\n\",\n    \"        ban_items = get_illegal_ids_by_inter_num(df, field=course_id, max_num=None, min_num=min_i_num)\\n\",\n    \"        if len(ban_users) == 0 and len(ban_items) == 0:\\n\",\n    \"            return\\n\",\n    \"\\n\",\n    \"        dropped_inter = pd.Series(False, index=df.index)\\n\",\n    \"        if learner_id:\\n\",\n    \"            dropped_inter |= df[learner_id].isin(ban_users)\\n\",\n    \"        if course_id:\\n\",\n    \"            dropped_inter |= df[course_id].isin(ban_items)\\n\",\n    \"        print(f'{len(dropped_inter)} dropped interactions')\\n\",\n    \"        df.drop(df.index[dropped_inter], inplace=True)\\n\",\n    \"\\n\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"## k-core\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 6,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"1906153 illegal_ids_by_inter_num, field=userID\\n\",\n      \"376127 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"3268695 dropped interactions\\n\",\n      \"22213 illegal_ids_by_inter_num, field=userID\\n\",\n      \"54919 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"589029 dropped interactions\\n\",\n      \"18323 illegal_ids_by_inter_num, field=userID\\n\",\n      \"3743 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"422478 dropped interactions\\n\",\n      \"2298 illegal_ids_by_inter_num, field=userID\\n\",\n      \"4388 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"349749 dropped interactions\\n\",\n      \"3331 illegal_ids_by_inter_num, field=userID\\n\",\n      \"639 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"326238 dropped interactions\\n\",\n      \"579 illegal_ids_by_inter_num, field=userID\\n\",\n      \"1012 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"311188 dropped interactions\\n\",\n      \"897 illegal_ids_by_inter_num, field=userID\\n\",\n      \"169 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"305054 dropped interactions\\n\",\n      \"155 illegal_ids_by_inter_num, field=userID\\n\",\n      \"308 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"300866 dropped interactions\\n\",\n      \"301 illegal_ids_by_inter_num, field=userID\\n\",\n      \"47 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"299031 dropped interactions\\n\",\n      \"50 illegal_ids_by_inter_num, field=userID\\n\",\n      \"79 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"297646 dropped interactions\\n\",\n      \"87 illegal_ids_by_inter_num, field=userID\\n\",\n      \"11 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"297132 dropped interactions\\n\",\n      \"16 illegal_ids_by_inter_num, field=userID\\n\",\n      \"24 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"296741 dropped interactions\\n\",\n      \"24 illegal_ids_by_inter_num, field=userID\\n\",\n      \"1 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"296581 dropped interactions\\n\",\n      \"1 illegal_ids_by_inter_num, field=userID\\n\",\n      \"8 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"296481 dropped interactions\\n\",\n      \"8 illegal_ids_by_inter_num, field=userID\\n\",\n      \"0 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"296445 dropped interactions\\n\",\n      \"0 illegal_ids_by_inter_num, field=userID\\n\",\n      \"5 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"296413 dropped interactions\\n\",\n      \"5 illegal_ids_by_inter_num, field=userID\\n\",\n      \"0 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"296393 dropped interactions\\n\",\n      \"0 illegal_ids_by_inter_num, field=userID\\n\",\n      \"3 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"296373 dropped interactions\\n\",\n      \"4 illegal_ids_by_inter_num, field=userID\\n\",\n      \"0 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"296361 dropped interactions\\n\",\n      \"0 illegal_ids_by_inter_num, field=userID\\n\",\n      \"1 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"296345 dropped interactions\\n\",\n      \"1 illegal_ids_by_inter_num, field=userID\\n\",\n      \"0 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"296341 dropped interactions\\n\",\n      \"0 illegal_ids_by_inter_num, field=userID\\n\",\n      \"0 illegal_ids_by_inter_num, field=itemID\\n\",\n      \"k-core shape: (296337, 4)\\n\",\n      \"shape after k-core: (296337, 4)\\n\"\n     ]\n    },\n    {\n     \"data\": {\n      \"text/plain\": \"             userID      itemID  rating   timestamp\\n564    AIXZKN4ACSKI  1881509818     5.0  1390694400\\n565  A1L5P841VIO02V  1881509818     5.0  1328140800\",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>userID</th>\\n      <th>itemID</th>\\n      <th>rating</th>\\n      <th>timestamp</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>564</th>\\n      <td>AIXZKN4ACSKI</td>\\n      <td>1881509818</td>\\n      <td>5.0</td>\\n      <td>1390694400</td>\\n    </tr>\\n    <tr>\\n      <th>565</th>\\n      <td>A1L5P841VIO02V</td>\\n      <td>1881509818</td>\\n      <td>5.0</td>\\n      <td>1328140800</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 6,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"filter_by_k_core(df)\\n\",\n    \"print(f'k-core shape: {df.shape}')\\n\",\n    \"print(f'shape after k-core: {df.shape}')\\n\",\n    \"df[:2]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"## Re-index\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 7,\n   \"outputs\": [],\n   \"source\": [\n    \"df.reset_index(drop=True, inplace=True)\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 8,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"mapping dumped...\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"\\n\",\n    \"i_mapping_file = 'i_id_mapping.csv'\\n\",\n    \"u_mapping_file = 'u_id_mapping.csv'\\n\",\n    \"\\n\",\n    \"splitting = [0.8, 0.1, 0.1]\\n\",\n    \"uid_field, iid_field = learner_id, course_id\\n\",\n    \"\\n\",\n    \"uni_users = pd.unique(df[uid_field])\\n\",\n    \"uni_items = pd.unique(df[iid_field])\\n\",\n    \"\\n\",\n    \"# start from 0\\n\",\n    \"u_id_map = {k: i for i, k in enumerate(uni_users)}\\n\",\n    \"i_id_map = {k: i for i, k in enumerate(uni_items)}\\n\",\n    \"\\n\",\n    \"df[uid_field] = df[uid_field].map(u_id_map)\\n\",\n    \"df[iid_field] = df[iid_field].map(i_id_map)\\n\",\n    \"df[uid_field] = df[uid_field].astype(int)\\n\",\n    \"df[iid_field] = df[iid_field].astype(int)\\n\",\n    \"\\n\",\n    \"# dump\\n\",\n    \"rslt_dir = './'\\n\",\n    \"u_df = pd.DataFrame(list(u_id_map.items()), columns=['user_id', 'userID'])\\n\",\n    \"i_df = pd.DataFrame(list(i_id_map.items()), columns=['asin', 'itemID'])\\n\",\n    \"\\n\",\n    \"u_df.to_csv(os.path.join(rslt_dir, u_mapping_file), sep='\\\\t', index=False)\\n\",\n    \"i_df.to_csv(os.path.join(rslt_dir, i_mapping_file), sep='\\\\t', index=False)\\n\",\n    \"print(f'mapping dumped...')\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"outputs\": [],\n   \"source\": [\n    \"\\n\",\n    \"# =========2. splitting\\n\",\n    \"print(f'splitting ...')\\n\",\n    \"tot_ratio = sum(splitting)\\n\",\n    \"# remove 0.0 in ratios\\n\",\n    \"ratios = [i for i in splitting if i > .0]\\n\",\n    \"ratios = [_ / tot_ratio for _ in ratios]\\n\",\n    \"split_ratios = np.cumsum(ratios)[:-1]\\n\",\n    \"\\n\",\n    \"#df[tmstmp_str] = df[tmstmp_str].map(lambda x: datetime.strptime(x, \\\"%Y-%m-%dT%H:%M:%SZ\\\"))\\n\",\n    \"split_ratios\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\",\n     \"is_executing\": true\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 10,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"columns: Index(['userID', 'itemID', 'rating', 'timestamp', 'x_label'], dtype='object')\\n\"\n     ]\n    },\n    {\n     \"data\": {\n      \"text/plain\": \"   userID  itemID  rating   timestamp  x_label\\n1       1       0     5.0  1328140800        0\\n2       2       0     4.0  1330387200        0\\n3       3       0     4.0  1328400000        0\\n4       4       0     4.0  1366675200        0\\n5       5       0     5.0  1351814400        0\",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>userID</th>\\n      <th>itemID</th>\\n      <th>rating</th>\\n      <th>timestamp</th>\\n      <th>x_label</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>1</th>\\n      <td>1</td>\\n      <td>0</td>\\n      <td>5.0</td>\\n      <td>1328140800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>2</th>\\n      <td>2</td>\\n      <td>0</td>\\n      <td>4.0</td>\\n      <td>1330387200</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>3</th>\\n      <td>3</td>\\n      <td>0</td>\\n      <td>4.0</td>\\n      <td>1328400000</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>4</th>\\n      <td>4</td>\\n      <td>0</td>\\n      <td>4.0</td>\\n      <td>1366675200</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>5</th>\\n      <td>5</td>\\n      <td>0</td>\\n      <td>5.0</td>\\n      <td>1351814400</td>\\n      <td>0</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 10,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"ts_id = 'timestamp'\\n\",\n    \"\\n\",\n    \"split_timestamps = list(np.quantile(df[ts_id], split_ratios))\\n\",\n    \"# get df training dataset unique users/items\\n\",\n    \"df_train = df.loc[df[ts_id] < split_timestamps[0]].copy()\\n\",\n    \"df_val = df.loc[(split_timestamps[0] <= df[ts_id]) & (df[ts_id] < split_timestamps[1])].copy()\\n\",\n    \"df_test = df.loc[(split_timestamps[1] <= df[ts_id])].copy()\\n\",\n    \"\\n\",\n    \"x_label, rslt_file = 'x_label', 'sports14-indexed.inter'\\n\",\n    \"df_train[x_label] = 0\\n\",\n    \"df_val[x_label] = 1\\n\",\n    \"df_test[x_label] = 2\\n\",\n    \"temp_df = pd.concat([df_train, df_val, df_test])\\n\",\n    \"temp_df = temp_df[[learner_id, course_id, 'rating', ts_id, x_label]]\\n\",\n    \"print(f'columns: {temp_df.columns}')\\n\",\n    \"\\n\",\n    \"temp_df.columns = [learner_id, course_id, 'rating', ts_id, x_label]\\n\",\n    \"\\n\",\n    \"temp_df.to_csv(os.path.join(rslt_dir, rslt_file), sep='\\\\t', index=False)\\n\",\n    \"temp_df[:5]\\n\",\n    \"#print('done!')\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"## Reload\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%% md\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 11,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"shape: (296337, 5)\\n\"\n     ]\n    },\n    {\n     \"data\": {\n      \"text/plain\": \"   userID  itemID  rating   timestamp  x_label\\n0       1       0     5.0  1328140800        0\\n1       2       0     4.0  1330387200        0\\n2       3       0     4.0  1328400000        0\\n3       4       0     4.0  1366675200        0\",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>userID</th>\\n      <th>itemID</th>\\n      <th>rating</th>\\n      <th>timestamp</th>\\n      <th>x_label</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>0</th>\\n      <td>1</td>\\n      <td>0</td>\\n      <td>5.0</td>\\n      <td>1328140800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>1</th>\\n      <td>2</td>\\n      <td>0</td>\\n      <td>4.0</td>\\n      <td>1330387200</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>2</th>\\n      <td>3</td>\\n      <td>0</td>\\n      <td>4.0</td>\\n      <td>1328400000</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>3</th>\\n      <td>4</td>\\n      <td>0</td>\\n      <td>4.0</td>\\n      <td>1366675200</td>\\n      <td>0</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 11,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"indexed_df = pd.read_csv(rslt_file, sep='\\\\t')\\n\",\n    \"print(f'shape: {indexed_df.shape}')\\n\",\n    \"indexed_df[:4]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 12,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"# of unique learners: 35598\\n\",\n      \"# of unique courses: 18357\\n\",\n      \"min/max of unique learners: 0/35597\\n\",\n      \"min/max of unique courses: 0/18356\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"u_uni = indexed_df[learner_id].unique()\\n\",\n    \"c_uni = indexed_df[course_id].unique()\\n\",\n    \"\\n\",\n    \"print(f'# of unique learners: {len(u_uni)}')\\n\",\n    \"print(f'# of unique courses: {len(c_uni)}')\\n\",\n    \"\\n\",\n    \"print('min/max of unique learners: {0}/{1}'.format(min(u_uni), max(u_uni)))\\n\",\n    \"print('min/max of unique courses: {0}/{1}'.format(min(c_uni), max(c_uni)))\\n\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 2\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython2\",\n   \"version\": \"2.7.6\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}"
  },
  {
    "path": "preprocessing/1splitting.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"# 基于rating2inter.ipynb生成的5-core交互图，Train/Validation/Test data splitting\\n\",\n    \"- Based on generated interactions, perform data splitting\\n\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 1,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import os, csv\\n\",\n    \"import pandas as pd\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"outputs\": [],\n   \"source\": [\n    \"os.chdir('/home/enoche/MMRec/Sports14')\\n\",\n    \"os.getcwd()\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\",\n     \"is_executing\": true\n    }\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"## 直接加载现成的, Load interactions\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 3,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"shape: (296337, 5)\\n\"\n     ]\n    },\n    {\n     \"data\": {\n      \"text/plain\": \"   userID  itemID  rating   timestamp  x_label\\n0       1       0     5.0  1328140800        0\\n1       2       0     4.0  1330387200        0\\n2       3       0     4.0  1328400000        0\\n3       4       0     4.0  1366675200        0\",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>userID</th>\\n      <th>itemID</th>\\n      <th>rating</th>\\n      <th>timestamp</th>\\n      <th>x_label</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>0</th>\\n      <td>1</td>\\n      <td>0</td>\\n      <td>5.0</td>\\n      <td>1328140800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>1</th>\\n      <td>2</td>\\n      <td>0</td>\\n      <td>4.0</td>\\n      <td>1330387200</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>2</th>\\n      <td>3</td>\\n      <td>0</td>\\n      <td>4.0</td>\\n      <td>1328400000</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>3</th>\\n      <td>4</td>\\n      <td>0</td>\\n      <td>4.0</td>\\n      <td>1366675200</td>\\n      <td>0</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 3,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"rslt_file = 'sports14-indexed.inter'\\n\",\n    \"df = pd.read_csv(rslt_file, sep='\\\\t')\\n\",\n    \"print(f'shape: {df.shape}')\\n\",\n    \"df[:4]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 4,\n   \"outputs\": [],\n   \"source\": [\n    \"import random\\n\",\n    \"import numpy as np\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 4,\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": \"        userID  itemID  rating   timestamp  x_label\\n154667       0   11981     2.0  1390694400        1\\n295557       0   15852     5.0  1390694400        1\\n189316       0   17787     3.0  1391990400        2\\n151302       0       0     5.0  1390694400        1\\n1820         0    3369     5.0  1405123200        2\\n60040        0   13372     5.0  1391990400        2\\n199192       0    5458     5.0  1405123200        2\\n163234       0    3327     3.0  1391990400        2\\n60837        1    2322     5.0  1337212800        0\\n233786       1    4123     5.0  1354838400        0\\n163460       1   14212     5.0  1368230400        0\\n206628       1    1542     4.0  1302220800        0\\n261633       1    8802     4.0  1368230400        0\\n99658        1    9198     5.0  1318377600        0\\n268935       1    7215     5.0  1285372800        0\\n77956        1   13468     5.0  1328140800        0\\n105444       1    2374     5.0  1391558400        1\\n237889       1    7169     5.0  1302220800        0\\n173295       1    6677     5.0  1318377600        0\\n50074        1   15278     5.0  1344902400        0\",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>userID</th>\\n      <th>itemID</th>\\n      <th>rating</th>\\n      <th>timestamp</th>\\n      <th>x_label</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>154667</th>\\n      <td>0</td>\\n      <td>11981</td>\\n      <td>2.0</td>\\n      <td>1390694400</td>\\n      <td>1</td>\\n    </tr>\\n    <tr>\\n      <th>295557</th>\\n      <td>0</td>\\n      <td>15852</td>\\n      <td>5.0</td>\\n      <td>1390694400</td>\\n      <td>1</td>\\n    </tr>\\n    <tr>\\n      <th>189316</th>\\n      <td>0</td>\\n      <td>17787</td>\\n      <td>3.0</td>\\n      <td>1391990400</td>\\n      <td>2</td>\\n    </tr>\\n    <tr>\\n      <th>151302</th>\\n      <td>0</td>\\n      <td>0</td>\\n      <td>5.0</td>\\n      <td>1390694400</td>\\n      <td>1</td>\\n    </tr>\\n    <tr>\\n      <th>1820</th>\\n      <td>0</td>\\n      <td>3369</td>\\n      <td>5.0</td>\\n      <td>1405123200</td>\\n      <td>2</td>\\n    </tr>\\n    <tr>\\n      <th>60040</th>\\n      <td>0</td>\\n      <td>13372</td>\\n      <td>5.0</td>\\n      <td>1391990400</td>\\n      <td>2</td>\\n    </tr>\\n    <tr>\\n      <th>199192</th>\\n      <td>0</td>\\n      <td>5458</td>\\n      <td>5.0</td>\\n      <td>1405123200</td>\\n      <td>2</td>\\n    </tr>\\n    <tr>\\n      <th>163234</th>\\n      <td>0</td>\\n      <td>3327</td>\\n      <td>3.0</td>\\n      <td>1391990400</td>\\n      <td>2</td>\\n    </tr>\\n    <tr>\\n      <th>60837</th>\\n      <td>1</td>\\n      <td>2322</td>\\n      <td>5.0</td>\\n      <td>1337212800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>233786</th>\\n      <td>1</td>\\n      <td>4123</td>\\n      <td>5.0</td>\\n      <td>1354838400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>163460</th>\\n      <td>1</td>\\n      <td>14212</td>\\n      <td>5.0</td>\\n      <td>1368230400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>206628</th>\\n      <td>1</td>\\n      <td>1542</td>\\n      <td>4.0</td>\\n      <td>1302220800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>261633</th>\\n      <td>1</td>\\n      <td>8802</td>\\n      <td>4.0</td>\\n      <td>1368230400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>99658</th>\\n      <td>1</td>\\n      <td>9198</td>\\n      <td>5.0</td>\\n      <td>1318377600</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>268935</th>\\n      <td>1</td>\\n      <td>7215</td>\\n      <td>5.0</td>\\n      <td>1285372800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>77956</th>\\n      <td>1</td>\\n      <td>13468</td>\\n      <td>5.0</td>\\n      <td>1328140800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>105444</th>\\n      <td>1</td>\\n      <td>2374</td>\\n      <td>5.0</td>\\n      <td>1391558400</td>\\n      <td>1</td>\\n    </tr>\\n    <tr>\\n      <th>237889</th>\\n      <td>1</td>\\n      <td>7169</td>\\n      <td>5.0</td>\\n      <td>1302220800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>173295</th>\\n      <td>1</td>\\n      <td>6677</td>\\n      <td>5.0</td>\\n      <td>1318377600</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>50074</th>\\n      <td>1</td>\\n      <td>15278</td>\\n      <td>5.0</td>\\n      <td>1344902400</td>\\n      <td>0</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 4,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"\\n\",\n    \"df = df.sample(frac=1).reset_index(drop=True)\\n\",\n    \"\\n\",\n    \"df.sort_values(by=['userID'], inplace=True)\\n\",\n    \"df[:20]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"outputs\": [],\n   \"source\": [\n    \"uid_field, iid_field = 'userID', 'itemID'\\n\",\n    \"\\n\",\n    \"uid_freq = df.groupby(uid_field)[iid_field]\\n\",\n    \"u_i_dict = {}\\n\",\n    \"for u, u_ls in uid_freq:\\n\",\n    \"    u_i_dict[u] = list(u_ls)\\n\",\n    \"u_i_dict\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\",\n     \"is_executing\": true\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 6,\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": \"[0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 1,\\n 2,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 1,\\n 2,\\n 2,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 1,\\n 1,\\n 2,\\n 2,\\n 0,\\n 0,\\n 0,\\n 1,\\n 2,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 1,\\n 2,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0,\\n 0]\"\n     },\n     \"execution_count\": 6,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"new_label = []\\n\",\n    \"u_ids_sorted = sorted(u_i_dict.keys())\\n\",\n    \"\\n\",\n    \"for u in u_ids_sorted:\\n\",\n    \"    items = u_i_dict[u]\\n\",\n    \"    n_items = len(items)\\n\",\n    \"    if n_items < 10:\\n\",\n    \"        tmp_ls = [0] * (n_items - 2) + [1] + [2]\\n\",\n    \"    else:\\n\",\n    \"        val_test_len = int(n_items * 0.2)\\n\",\n    \"        train_len = n_items - val_test_len\\n\",\n    \"        val_len = val_test_len // 2\\n\",\n    \"        test_len = val_test_len - val_len\\n\",\n    \"        tmp_ls = [0] * train_len + [1] * val_len + [2] * test_len\\n\",\n    \"    new_label.extend(tmp_ls)\\n\",\n    \"\\n\",\n    \"new_label[:100]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 7,\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": \"        userID  itemID  rating   timestamp  x_label\\n154667       0   11981     2.0  1390694400        0\\n295557       0   15852     5.0  1390694400        0\\n189316       0   17787     3.0  1391990400        0\\n151302       0       0     5.0  1390694400        0\\n1820         0    3369     5.0  1405123200        0\\n60040        0   13372     5.0  1391990400        0\\n199192       0    5458     5.0  1405123200        1\\n163234       0    3327     3.0  1391990400        2\\n60837        1    2322     5.0  1337212800        0\\n233786       1    4123     5.0  1354838400        0\\n163460       1   14212     5.0  1368230400        0\\n206628       1    1542     4.0  1302220800        0\\n261633       1    8802     4.0  1368230400        0\\n99658        1    9198     5.0  1318377600        0\\n268935       1    7215     5.0  1285372800        0\\n77956        1   13468     5.0  1328140800        0\\n105444       1    2374     5.0  1391558400        0\\n237889       1    7169     5.0  1302220800        0\\n173295       1    6677     5.0  1318377600        0\\n50074        1   15278     5.0  1344902400        0\",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>userID</th>\\n      <th>itemID</th>\\n      <th>rating</th>\\n      <th>timestamp</th>\\n      <th>x_label</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>154667</th>\\n      <td>0</td>\\n      <td>11981</td>\\n      <td>2.0</td>\\n      <td>1390694400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>295557</th>\\n      <td>0</td>\\n      <td>15852</td>\\n      <td>5.0</td>\\n      <td>1390694400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>189316</th>\\n      <td>0</td>\\n      <td>17787</td>\\n      <td>3.0</td>\\n      <td>1391990400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>151302</th>\\n      <td>0</td>\\n      <td>0</td>\\n      <td>5.0</td>\\n      <td>1390694400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>1820</th>\\n      <td>0</td>\\n      <td>3369</td>\\n      <td>5.0</td>\\n      <td>1405123200</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>60040</th>\\n      <td>0</td>\\n      <td>13372</td>\\n      <td>5.0</td>\\n      <td>1391990400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>199192</th>\\n      <td>0</td>\\n      <td>5458</td>\\n      <td>5.0</td>\\n      <td>1405123200</td>\\n      <td>1</td>\\n    </tr>\\n    <tr>\\n      <th>163234</th>\\n      <td>0</td>\\n      <td>3327</td>\\n      <td>3.0</td>\\n      <td>1391990400</td>\\n      <td>2</td>\\n    </tr>\\n    <tr>\\n      <th>60837</th>\\n      <td>1</td>\\n      <td>2322</td>\\n      <td>5.0</td>\\n      <td>1337212800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>233786</th>\\n      <td>1</td>\\n      <td>4123</td>\\n      <td>5.0</td>\\n      <td>1354838400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>163460</th>\\n      <td>1</td>\\n      <td>14212</td>\\n      <td>5.0</td>\\n      <td>1368230400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>206628</th>\\n      <td>1</td>\\n      <td>1542</td>\\n      <td>4.0</td>\\n      <td>1302220800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>261633</th>\\n      <td>1</td>\\n      <td>8802</td>\\n      <td>4.0</td>\\n      <td>1368230400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>99658</th>\\n      <td>1</td>\\n      <td>9198</td>\\n      <td>5.0</td>\\n      <td>1318377600</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>268935</th>\\n      <td>1</td>\\n      <td>7215</td>\\n      <td>5.0</td>\\n      <td>1285372800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>77956</th>\\n      <td>1</td>\\n      <td>13468</td>\\n      <td>5.0</td>\\n      <td>1328140800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>105444</th>\\n      <td>1</td>\\n      <td>2374</td>\\n      <td>5.0</td>\\n      <td>1391558400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>237889</th>\\n      <td>1</td>\\n      <td>7169</td>\\n      <td>5.0</td>\\n      <td>1302220800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>173295</th>\\n      <td>1</td>\\n      <td>6677</td>\\n      <td>5.0</td>\\n      <td>1318377600</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>50074</th>\\n      <td>1</td>\\n      <td>15278</td>\\n      <td>5.0</td>\\n      <td>1344902400</td>\\n      <td>0</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 7,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"df['x_label'] = new_label\\n\",\n    \"df[:20]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 12,\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": \"'beauty14-indexed'\"\n     },\n     \"execution_count\": 12,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"rslt_file[:-6]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 8,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"done!!!\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"new_labeled_file = rslt_file[:-6] + '-v4.inter'\\n\",\n    \"df.to_csv(os.path.join('./', new_labeled_file), sep='\\\\t', index=False)\\n\",\n    \"print('done!!!')\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"## Reload\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%% md\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 9,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"shape: (296337, 5)\\n\"\n     ]\n    },\n    {\n     \"data\": {\n      \"text/plain\": \"    userID  itemID  rating   timestamp  x_label\\n0        0   11981     2.0  1390694400        0\\n1        0   15852     5.0  1390694400        0\\n2        0   17787     3.0  1391990400        0\\n3        0       0     5.0  1390694400        0\\n4        0    3369     5.0  1405123200        0\\n5        0   13372     5.0  1391990400        0\\n6        0    5458     5.0  1405123200        1\\n7        0    3327     3.0  1391990400        2\\n8        1    2322     5.0  1337212800        0\\n9        1    4123     5.0  1354838400        0\\n10       1   14212     5.0  1368230400        0\\n11       1    1542     4.0  1302220800        0\\n12       1    8802     4.0  1368230400        0\\n13       1    9198     5.0  1318377600        0\\n14       1    7215     5.0  1285372800        0\\n15       1   13468     5.0  1328140800        0\\n16       1    2374     5.0  1391558400        0\\n17       1    7169     5.0  1302220800        0\\n18       1    6677     5.0  1318377600        0\\n19       1   15278     5.0  1344902400        0\",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>userID</th>\\n      <th>itemID</th>\\n      <th>rating</th>\\n      <th>timestamp</th>\\n      <th>x_label</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>0</th>\\n      <td>0</td>\\n      <td>11981</td>\\n      <td>2.0</td>\\n      <td>1390694400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>1</th>\\n      <td>0</td>\\n      <td>15852</td>\\n      <td>5.0</td>\\n      <td>1390694400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>2</th>\\n      <td>0</td>\\n      <td>17787</td>\\n      <td>3.0</td>\\n      <td>1391990400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>3</th>\\n      <td>0</td>\\n      <td>0</td>\\n      <td>5.0</td>\\n      <td>1390694400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>4</th>\\n      <td>0</td>\\n      <td>3369</td>\\n      <td>5.0</td>\\n      <td>1405123200</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>5</th>\\n      <td>0</td>\\n      <td>13372</td>\\n      <td>5.0</td>\\n      <td>1391990400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>6</th>\\n      <td>0</td>\\n      <td>5458</td>\\n      <td>5.0</td>\\n      <td>1405123200</td>\\n      <td>1</td>\\n    </tr>\\n    <tr>\\n      <th>7</th>\\n      <td>0</td>\\n      <td>3327</td>\\n      <td>3.0</td>\\n      <td>1391990400</td>\\n      <td>2</td>\\n    </tr>\\n    <tr>\\n      <th>8</th>\\n      <td>1</td>\\n      <td>2322</td>\\n      <td>5.0</td>\\n      <td>1337212800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>9</th>\\n      <td>1</td>\\n      <td>4123</td>\\n      <td>5.0</td>\\n      <td>1354838400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>10</th>\\n      <td>1</td>\\n      <td>14212</td>\\n      <td>5.0</td>\\n      <td>1368230400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>11</th>\\n      <td>1</td>\\n      <td>1542</td>\\n      <td>4.0</td>\\n      <td>1302220800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>12</th>\\n      <td>1</td>\\n      <td>8802</td>\\n      <td>4.0</td>\\n      <td>1368230400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>13</th>\\n      <td>1</td>\\n      <td>9198</td>\\n      <td>5.0</td>\\n      <td>1318377600</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>14</th>\\n      <td>1</td>\\n      <td>7215</td>\\n      <td>5.0</td>\\n      <td>1285372800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>15</th>\\n      <td>1</td>\\n      <td>13468</td>\\n      <td>5.0</td>\\n      <td>1328140800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>16</th>\\n      <td>1</td>\\n      <td>2374</td>\\n      <td>5.0</td>\\n      <td>1391558400</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>17</th>\\n      <td>1</td>\\n      <td>7169</td>\\n      <td>5.0</td>\\n      <td>1302220800</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>18</th>\\n      <td>1</td>\\n      <td>6677</td>\\n      <td>5.0</td>\\n      <td>1318377600</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>19</th>\\n      <td>1</td>\\n      <td>15278</td>\\n      <td>5.0</td>\\n      <td>1344902400</td>\\n      <td>0</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 9,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"indexed_df = pd.read_csv(new_labeled_file, sep='\\\\t')\\n\",\n    \"print(f'shape: {indexed_df.shape}')\\n\",\n    \"indexed_df[:20]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 10,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"# of unique learners: 35598\\n\",\n      \"# of unique courses: 18357\\n\",\n      \"min/max of unique learners: 0/35597\\n\",\n      \"min/max of unique courses: 0/18356\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"u_id_str, i_id_str = 'userID', 'itemID'\\n\",\n    \"u_uni = indexed_df[u_id_str].unique()\\n\",\n    \"c_uni = indexed_df[i_id_str].unique()\\n\",\n    \"\\n\",\n    \"print(f'# of unique learners: {len(u_uni)}')\\n\",\n    \"print(f'# of unique courses: {len(c_uni)}')\\n\",\n    \"\\n\",\n    \"print('min/max of unique learners: {0}/{1}'.format(min(u_uni), max(u_uni)))\\n\",\n    \"print('min/max of unique courses: {0}/{1}'.format(min(c_uni), max(c_uni)))\\n\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 2\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython2\",\n   \"version\": \"2.7.6\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}"
  },
  {
    "path": "preprocessing/2reindex-feat.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"# 利用rating2inter.ipynb中U/I的index对features进行一一对应(meta-text)\\n\",\n    \"- Reindex item feature ID with IDs generated in 0rating2inter.ipynb\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 1,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import os\\n\",\n    \"import pandas as pd\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 2,\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": \"'/home/xin/XMMRec/Sports14'\"\n     },\n     \"execution_count\": 2,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"os.chdir('/home/xin/MMRec/Sports14')\\n\",\n    \"os.getcwd()\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 3,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"shape: (18357, 2)\\n\"\n     ]\n    },\n    {\n     \"data\": {\n      \"text/plain\": \"         asin  itemID\\n0  1881509818       0\\n1  2094869245       1\\n2  7245456259       2\\n3  7245456313       3\",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>asin</th>\\n      <th>itemID</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>0</th>\\n      <td>1881509818</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>1</th>\\n      <td>2094869245</td>\\n      <td>1</td>\\n    </tr>\\n    <tr>\\n      <th>2</th>\\n      <td>7245456259</td>\\n      <td>2</td>\\n    </tr>\\n    <tr>\\n      <th>3</th>\\n      <td>7245456313</td>\\n      <td>3</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 3,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"# load item mapping\\n\",\n    \"i_id_mapping = 'i_id_mapping.csv'\\n\",\n    \"df = pd.read_csv(i_id_mapping, sep='\\\\t')\\n\",\n    \"print(f'shape: {df.shape}')\\n\",\n    \"df[:4]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 4,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"0 Extracting U-I interactions.\\n\",\n      \"Total records: (532197, 9)\\n\"\n     ]\n    },\n    {\n     \"data\": {\n      \"text/plain\": \"         asin                           title  price  \\\\\\n0  0000032069  Adult Ballet Tutu Cheetah Pink   7.89   \\n1  0000031909     Girls Ballet Tutu Neon Pink   7.00   \\n2  0000032034        Adult Ballet Tutu Yellow   7.87   \\n\\n                                               imUrl  \\\\\\n0  http://ecx.images-amazon.com/images/I/51EzU6qu...   \\n1  http://ecx.images-amazon.com/images/I/41xBoP0F...   \\n2  http://ecx.images-amazon.com/images/I/21GNUNIa...   \\n\\n                                             related     brand  \\\\\\n0  {'also_bought': ['0000032050', 'B00D0DJAEG', '...  BubuBibi   \\n1  {'also_bought': ['B002BZX8Z6', 'B00JHONN1S', '...   Unknown   \\n2  {'also_bought': ['B00D2JSRFQ', '0000032042', '...  BubuBibi   \\n\\n                                          categories  \\\\\\n0  [[Sports & Outdoors, Other Sports, Dance, Clot...   \\n1         [[Sports & Outdoors, Other Sports, Dance]]   \\n2  [[Sports & Outdoors, Other Sports, Dance, Clot...   \\n\\n                  salesRank                                        description  \\n0                       NaN                                                NaN  \\n1  {'Toys & Games': 201847}  High quality 3 layer ballet tutu. 12 inches in...  \\n2                       NaN                                                NaN  \",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>asin</th>\\n      <th>title</th>\\n      <th>price</th>\\n      <th>imUrl</th>\\n      <th>related</th>\\n      <th>brand</th>\\n      <th>categories</th>\\n      <th>salesRank</th>\\n      <th>description</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>0</th>\\n      <td>0000032069</td>\\n      <td>Adult Ballet Tutu Cheetah Pink</td>\\n      <td>7.89</td>\\n      <td>http://ecx.images-amazon.com/images/I/51EzU6qu...</td>\\n      <td>{'also_bought': ['0000032050', 'B00D0DJAEG', '...</td>\\n      <td>BubuBibi</td>\\n      <td>[[Sports &amp; Outdoors, Other Sports, Dance, Clot...</td>\\n      <td>NaN</td>\\n      <td>NaN</td>\\n    </tr>\\n    <tr>\\n      <th>1</th>\\n      <td>0000031909</td>\\n      <td>Girls Ballet Tutu Neon Pink</td>\\n      <td>7.00</td>\\n      <td>http://ecx.images-amazon.com/images/I/41xBoP0F...</td>\\n      <td>{'also_bought': ['B002BZX8Z6', 'B00JHONN1S', '...</td>\\n      <td>Unknown</td>\\n      <td>[[Sports &amp; Outdoors, Other Sports, Dance]]</td>\\n      <td>{'Toys &amp; Games': 201847}</td>\\n      <td>High quality 3 layer ballet tutu. 12 inches in...</td>\\n    </tr>\\n    <tr>\\n      <th>2</th>\\n      <td>0000032034</td>\\n      <td>Adult Ballet Tutu Yellow</td>\\n      <td>7.87</td>\\n      <td>http://ecx.images-amazon.com/images/I/21GNUNIa...</td>\\n      <td>{'also_bought': ['B00D2JSRFQ', '0000032042', '...</td>\\n      <td>BubuBibi</td>\\n      <td>[[Sports &amp; Outdoors, Other Sports, Dance, Clot...</td>\\n      <td>NaN</td>\\n      <td>NaN</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 4,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"\\n\",\n    \"import gzip, json\\n\",\n    \"meta_file = 'meta_Sports_and_Outdoors.json.gz'\\n\",\n    \"\\n\",\n    \"print('0 Extracting U-I interactions.')\\n\",\n    \"\\n\",\n    \"def parse(path):\\n\",\n    \"  g = gzip.open(path, 'rb')\\n\",\n    \"  for l in g:\\n\",\n    \"    yield eval(l)\\n\",\n    \"\\n\",\n    \"def getDF(path):\\n\",\n    \"  i = 0\\n\",\n    \"  df = {}\\n\",\n    \"  for d in parse(path):\\n\",\n    \"    df[i] = d\\n\",\n    \"    i += 1\\n\",\n    \"  return pd.DataFrame.from_dict(df, orient='index')\\n\",\n    \"\\n\",\n    \"meta_df = getDF(meta_file)\\n\",\n    \"\\n\",\n    \"print(f'Total records: {meta_df.shape}')\\n\",\n    \"meta_df[:3]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 5,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"shape: (18357, 10)\\n\"\n     ]\n    },\n    {\n     \"data\": {\n      \"text/plain\": \"           asin                                              title  price  \\\\\\n132  1881509818           Ghost Inc Glock Armorers Tool 3/32 Punch   9.99   \\n155  2094869245  5 LED Bicycle Rear Tail Red Bike Torch Laser B...   8.26   \\n\\n                                                 imUrl  \\\\\\n132  http://ecx.images-amazon.com/images/I/21iMxsyD...   \\n155  http://ecx.images-amazon.com/images/I/51RtwnJw...   \\n\\n                                               related  brand  \\\\\\n132  {'also_bought': ['B000U3YWEM', 'B000U401J6', '...  Ghost   \\n155  {'also_bought': ['B0081O93N2', 'B00EYTCHJA', '...          \\n\\n                                            categories  \\\\\\n132  [[Sports & Outdoors, Hunting & Fishing, Huntin...   \\n155  [[Sports & Outdoors, Cycling, Lights & Reflect...   \\n\\n                             salesRank  \\\\\\n132  {'Sports &amp; Outdoors': 172909}   \\n155   {'Sports &amp; Outdoors': 14293}   \\n\\n                                           description  itemID  \\n132  Ghost Armorer Tool (1). The GAT is made with a...       0  \\n155  This newly-designed Laser tail light can emit ...       1  \",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>asin</th>\\n      <th>title</th>\\n      <th>price</th>\\n      <th>imUrl</th>\\n      <th>related</th>\\n      <th>brand</th>\\n      <th>categories</th>\\n      <th>salesRank</th>\\n      <th>description</th>\\n      <th>itemID</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>132</th>\\n      <td>1881509818</td>\\n      <td>Ghost Inc Glock Armorers Tool 3/32 Punch</td>\\n      <td>9.99</td>\\n      <td>http://ecx.images-amazon.com/images/I/21iMxsyD...</td>\\n      <td>{'also_bought': ['B000U3YWEM', 'B000U401J6', '...</td>\\n      <td>Ghost</td>\\n      <td>[[Sports &amp; Outdoors, Hunting &amp; Fishing, Huntin...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 172909}</td>\\n      <td>Ghost Armorer Tool (1). The GAT is made with a...</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>155</th>\\n      <td>2094869245</td>\\n      <td>5 LED Bicycle Rear Tail Red Bike Torch Laser B...</td>\\n      <td>8.26</td>\\n      <td>http://ecx.images-amazon.com/images/I/51RtwnJw...</td>\\n      <td>{'also_bought': ['B0081O93N2', 'B00EYTCHJA', '...</td>\\n      <td></td>\\n      <td>[[Sports &amp; Outdoors, Cycling, Lights &amp; Reflect...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 14293}</td>\\n      <td>This newly-designed Laser tail light can emit ...</td>\\n      <td>1</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 5,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"# remapping\\n\",\n    \"map_dict = dict(zip(df['asin'], df['itemID']))\\n\",\n    \"\\n\",\n    \"meta_df['itemID'] = meta_df['asin'].map(map_dict)\\n\",\n    \"meta_df.dropna(subset=['itemID'], inplace=True)\\n\",\n    \"meta_df['itemID'] = meta_df['itemID'].astype('int64')\\n\",\n    \"#meta_df['description'] = meta_df['description'].fillna(\\\" \\\")\\n\",\n    \"meta_df.sort_values(by=['itemID'], inplace=True)\\n\",\n    \"\\n\",\n    \"print(f'shape: {meta_df.shape}')\\n\",\n    \"meta_df[:2]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 6,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"new column names: ['itemID', 'asin', 'title', 'price', 'imUrl', 'related', 'brand', 'categories', 'salesRank', 'description']\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"ori_cols = meta_df.columns.tolist()\\n\",\n    \"\\n\",\n    \"ret_cols = [ori_cols[-1]] + ori_cols[:-1]\\n\",\n    \"print(f'new column names: {ret_cols}')\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 7,\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": \"           asin                                              title  price  \\\\\\n132  1881509818           Ghost Inc Glock Armorers Tool 3/32 Punch   9.99   \\n155  2094869245  5 LED Bicycle Rear Tail Red Bike Torch Laser B...   8.26   \\n201  7245456259  Black Mountain Products Single Resistance Band...  10.49   \\n\\n                                                 imUrl  \\\\\\n132  http://ecx.images-amazon.com/images/I/21iMxsyD...   \\n155  http://ecx.images-amazon.com/images/I/51RtwnJw...   \\n201  http://ecx.images-amazon.com/images/I/411Ikpf1...   \\n\\n                                               related           brand  \\\\\\n132  {'also_bought': ['B000U3YWEM', 'B000U401J6', '...           Ghost   \\n155  {'also_bought': ['B0081O93N2', 'B00EYTCHJA', '...                   \\n201  {'also_bought': ['B00DDBS2JE', 'B00H1KNHE8', '...  Black Mountain   \\n\\n                                            categories  \\\\\\n132  [[Sports & Outdoors, Hunting & Fishing, Huntin...   \\n155  [[Sports & Outdoors, Cycling, Lights & Reflect...   \\n201  [[Sports & Outdoors, Exercise & Fitness, Acces...   \\n\\n                             salesRank  \\\\\\n132  {'Sports &amp; Outdoors': 172909}   \\n155   {'Sports &amp; Outdoors': 14293}   \\n201    {'Sports &amp; Outdoors': 1010}   \\n\\n                                           description  itemID  \\n132  Ghost Armorer Tool (1). The GAT is made with a...       0  \\n155  This newly-designed Laser tail light can emit ...       1  \\n201  Black Mountain Products single resistance band...       2  \",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>asin</th>\\n      <th>title</th>\\n      <th>price</th>\\n      <th>imUrl</th>\\n      <th>related</th>\\n      <th>brand</th>\\n      <th>categories</th>\\n      <th>salesRank</th>\\n      <th>description</th>\\n      <th>itemID</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>132</th>\\n      <td>1881509818</td>\\n      <td>Ghost Inc Glock Armorers Tool 3/32 Punch</td>\\n      <td>9.99</td>\\n      <td>http://ecx.images-amazon.com/images/I/21iMxsyD...</td>\\n      <td>{'also_bought': ['B000U3YWEM', 'B000U401J6', '...</td>\\n      <td>Ghost</td>\\n      <td>[[Sports &amp; Outdoors, Hunting &amp; Fishing, Huntin...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 172909}</td>\\n      <td>Ghost Armorer Tool (1). The GAT is made with a...</td>\\n      <td>0</td>\\n    </tr>\\n    <tr>\\n      <th>155</th>\\n      <td>2094869245</td>\\n      <td>5 LED Bicycle Rear Tail Red Bike Torch Laser B...</td>\\n      <td>8.26</td>\\n      <td>http://ecx.images-amazon.com/images/I/51RtwnJw...</td>\\n      <td>{'also_bought': ['B0081O93N2', 'B00EYTCHJA', '...</td>\\n      <td></td>\\n      <td>[[Sports &amp; Outdoors, Cycling, Lights &amp; Reflect...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 14293}</td>\\n      <td>This newly-designed Laser tail light can emit ...</td>\\n      <td>1</td>\\n    </tr>\\n    <tr>\\n      <th>201</th>\\n      <td>7245456259</td>\\n      <td>Black Mountain Products Single Resistance Band...</td>\\n      <td>10.49</td>\\n      <td>http://ecx.images-amazon.com/images/I/411Ikpf1...</td>\\n      <td>{'also_bought': ['B00DDBS2JE', 'B00H1KNHE8', '...</td>\\n      <td>Black Mountain</td>\\n      <td>[[Sports &amp; Outdoors, Exercise &amp; Fitness, Acces...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 1010}</td>\\n      <td>Black Mountain Products single resistance band...</td>\\n      <td>2</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 7,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"meta_df[:3]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 8,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"done!\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"ret_df = meta_df[ret_cols]\\n\",\n    \"# dump\\n\",\n    \"ret_df.to_csv(os.path.join('./', 'meta-sports14.csv'), index=False)\\n\",\n    \"print('done!')\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"## Reload\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 9,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"shape: (18357, 10)\\n\"\n     ]\n    },\n    {\n     \"data\": {\n      \"text/plain\": \"   itemID        asin                                              title  \\\\\\n0       0  1881509818           Ghost Inc Glock Armorers Tool 3/32 Punch   \\n1       1  2094869245  5 LED Bicycle Rear Tail Red Bike Torch Laser B...   \\n2       2  7245456259  Black Mountain Products Single Resistance Band...   \\n3       3  7245456313  Black Mountain Products Resistance Band Set wi...   \\n\\n   price                                              imUrl  \\\\\\n0   9.99  http://ecx.images-amazon.com/images/I/21iMxsyD...   \\n1   8.26  http://ecx.images-amazon.com/images/I/51RtwnJw...   \\n2  10.49  http://ecx.images-amazon.com/images/I/411Ikpf1...   \\n3  32.99  http://ecx.images-amazon.com/images/I/51FdHlZS...   \\n\\n                                             related           brand  \\\\\\n0  {'also_bought': ['B000U3YWEM', 'B000U401J6', '...           Ghost   \\n1  {'also_bought': ['B0081O93N2', 'B00EYTCHJA', '...             NaN   \\n2  {'also_bought': ['B00DDBS2JE', 'B00H1KNHE8', '...  Black Mountain   \\n3  {'also_bought': ['1612431712', 'B00GSBMW2Y', '...  Black Mountain   \\n\\n                                          categories  \\\\\\n0  [['Sports & Outdoors', 'Hunting & Fishing', 'H...   \\n1  [['Sports & Outdoors', 'Cycling', 'Lights & Re...   \\n2  [['Sports & Outdoors', 'Exercise & Fitness', '...   \\n3  [['Sports & Outdoors', 'Exercise & Fitness', '...   \\n\\n                           salesRank  \\\\\\n0  {'Sports &amp; Outdoors': 172909}   \\n1   {'Sports &amp; Outdoors': 14293}   \\n2    {'Sports &amp; Outdoors': 1010}   \\n3      {'Sports &amp; Outdoors': 15}   \\n\\n                                         description  \\n0  Ghost Armorer Tool (1). The GAT is made with a...  \\n1  This newly-designed Laser tail light can emit ...  \\n2  Black Mountain Products single resistance band...  \\n3  [if gte mso 9]><xml> <o:OfficeDocumentSettings...  \",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>itemID</th>\\n      <th>asin</th>\\n      <th>title</th>\\n      <th>price</th>\\n      <th>imUrl</th>\\n      <th>related</th>\\n      <th>brand</th>\\n      <th>categories</th>\\n      <th>salesRank</th>\\n      <th>description</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>0</th>\\n      <td>0</td>\\n      <td>1881509818</td>\\n      <td>Ghost Inc Glock Armorers Tool 3/32 Punch</td>\\n      <td>9.99</td>\\n      <td>http://ecx.images-amazon.com/images/I/21iMxsyD...</td>\\n      <td>{'also_bought': ['B000U3YWEM', 'B000U401J6', '...</td>\\n      <td>Ghost</td>\\n      <td>[['Sports &amp; Outdoors', 'Hunting &amp; Fishing', 'H...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 172909}</td>\\n      <td>Ghost Armorer Tool (1). The GAT is made with a...</td>\\n    </tr>\\n    <tr>\\n      <th>1</th>\\n      <td>1</td>\\n      <td>2094869245</td>\\n      <td>5 LED Bicycle Rear Tail Red Bike Torch Laser B...</td>\\n      <td>8.26</td>\\n      <td>http://ecx.images-amazon.com/images/I/51RtwnJw...</td>\\n      <td>{'also_bought': ['B0081O93N2', 'B00EYTCHJA', '...</td>\\n      <td>NaN</td>\\n      <td>[['Sports &amp; Outdoors', 'Cycling', 'Lights &amp; Re...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 14293}</td>\\n      <td>This newly-designed Laser tail light can emit ...</td>\\n    </tr>\\n    <tr>\\n      <th>2</th>\\n      <td>2</td>\\n      <td>7245456259</td>\\n      <td>Black Mountain Products Single Resistance Band...</td>\\n      <td>10.49</td>\\n      <td>http://ecx.images-amazon.com/images/I/411Ikpf1...</td>\\n      <td>{'also_bought': ['B00DDBS2JE', 'B00H1KNHE8', '...</td>\\n      <td>Black Mountain</td>\\n      <td>[['Sports &amp; Outdoors', 'Exercise &amp; Fitness', '...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 1010}</td>\\n      <td>Black Mountain Products single resistance band...</td>\\n    </tr>\\n    <tr>\\n      <th>3</th>\\n      <td>3</td>\\n      <td>7245456313</td>\\n      <td>Black Mountain Products Resistance Band Set wi...</td>\\n      <td>32.99</td>\\n      <td>http://ecx.images-amazon.com/images/I/51FdHlZS...</td>\\n      <td>{'also_bought': ['1612431712', 'B00GSBMW2Y', '...</td>\\n      <td>Black Mountain</td>\\n      <td>[['Sports &amp; Outdoors', 'Exercise &amp; Fitness', '...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 15}</td>\\n      <td>[if gte mso 9]&gt;&lt;xml&gt; &lt;o:OfficeDocumentSettings...</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 9,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"indexed_df = pd.read_csv('meta-sports14.csv')\\n\",\n    \"print(f'shape: {indexed_df.shape}')\\n\",\n    \"indexed_df[:4]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 10,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"# of unique items: 18357\\n\",\n      \"min/max of unique learners: 0/18356\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"## Reload\\n\",\n    \"\\n\",\n    \"i_uni = indexed_df['itemID'].unique()\\n\",\n    \"\\n\",\n    \"print(f'# of unique items: {len(i_uni)}')\\n\",\n    \"\\n\",\n    \"print('min/max of unique learners: {0}/{1}'.format(min(i_uni), max(i_uni)))\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 2\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython2\",\n   \"version\": \"2.7.6\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}"
  },
  {
    "path": "preprocessing/3feat-encoder.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"# Sports14 Text/Image Feature Extraction\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 1,\n   \"outputs\": [],\n   \"source\": [\n    \"\\n\",\n    \"import os\\n\",\n    \"import numpy as np\\n\",\n    \"import pandas as pd\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"outputs\": [],\n   \"source\": [\n    \"os.chdir('/home/xin/MMRec/Sports14')\\n\",\n    \"os.getcwd()\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\",\n     \"is_executing\": true\n    }\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"## Load text data\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 3,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"data loaded!\\n\",\n      \"shape: (18357, 10)\\n\"\n     ]\n    },\n    {\n     \"data\": {\n      \"text/plain\": \"   itemID        asin                                              title  \\\\\\n0       0  1881509818           Ghost Inc Glock Armorers Tool 3/32 Punch   \\n1       1  2094869245  5 LED Bicycle Rear Tail Red Bike Torch Laser B...   \\n2       2  7245456259  Black Mountain Products Single Resistance Band...   \\n\\n   price                                              imUrl  \\\\\\n0   9.99  http://ecx.images-amazon.com/images/I/21iMxsyD...   \\n1   8.26  http://ecx.images-amazon.com/images/I/51RtwnJw...   \\n2  10.49  http://ecx.images-amazon.com/images/I/411Ikpf1...   \\n\\n                                             related           brand  \\\\\\n0  {'also_bought': ['B000U3YWEM', 'B000U401J6', '...           Ghost   \\n1  {'also_bought': ['B0081O93N2', 'B00EYTCHJA', '...             NaN   \\n2  {'also_bought': ['B00DDBS2JE', 'B00H1KNHE8', '...  Black Mountain   \\n\\n                                          categories  \\\\\\n0  [['Sports & Outdoors', 'Hunting & Fishing', 'H...   \\n1  [['Sports & Outdoors', 'Cycling', 'Lights & Re...   \\n2  [['Sports & Outdoors', 'Exercise & Fitness', '...   \\n\\n                           salesRank  \\\\\\n0  {'Sports &amp; Outdoors': 172909}   \\n1   {'Sports &amp; Outdoors': 14293}   \\n2    {'Sports &amp; Outdoors': 1010}   \\n\\n                                         description  \\n0  Ghost Armorer Tool (1). The GAT is made with a...  \\n1  This newly-designed Laser tail light can emit ...  \\n2  Black Mountain Products single resistance band...  \",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>itemID</th>\\n      <th>asin</th>\\n      <th>title</th>\\n      <th>price</th>\\n      <th>imUrl</th>\\n      <th>related</th>\\n      <th>brand</th>\\n      <th>categories</th>\\n      <th>salesRank</th>\\n      <th>description</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>0</th>\\n      <td>0</td>\\n      <td>1881509818</td>\\n      <td>Ghost Inc Glock Armorers Tool 3/32 Punch</td>\\n      <td>9.99</td>\\n      <td>http://ecx.images-amazon.com/images/I/21iMxsyD...</td>\\n      <td>{'also_bought': ['B000U3YWEM', 'B000U401J6', '...</td>\\n      <td>Ghost</td>\\n      <td>[['Sports &amp; Outdoors', 'Hunting &amp; Fishing', 'H...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 172909}</td>\\n      <td>Ghost Armorer Tool (1). The GAT is made with a...</td>\\n    </tr>\\n    <tr>\\n      <th>1</th>\\n      <td>1</td>\\n      <td>2094869245</td>\\n      <td>5 LED Bicycle Rear Tail Red Bike Torch Laser B...</td>\\n      <td>8.26</td>\\n      <td>http://ecx.images-amazon.com/images/I/51RtwnJw...</td>\\n      <td>{'also_bought': ['B0081O93N2', 'B00EYTCHJA', '...</td>\\n      <td>NaN</td>\\n      <td>[['Sports &amp; Outdoors', 'Cycling', 'Lights &amp; Re...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 14293}</td>\\n      <td>This newly-designed Laser tail light can emit ...</td>\\n    </tr>\\n    <tr>\\n      <th>2</th>\\n      <td>2</td>\\n      <td>7245456259</td>\\n      <td>Black Mountain Products Single Resistance Band...</td>\\n      <td>10.49</td>\\n      <td>http://ecx.images-amazon.com/images/I/411Ikpf1...</td>\\n      <td>{'also_bought': ['B00DDBS2JE', 'B00H1KNHE8', '...</td>\\n      <td>Black Mountain</td>\\n      <td>[['Sports &amp; Outdoors', 'Exercise &amp; Fitness', '...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 1010}</td>\\n      <td>Black Mountain Products single resistance band...</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 3,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"i_id, desc_str = 'itemID', 'description'\\n\",\n    \"\\n\",\n    \"file_path = './'\\n\",\n    \"file_name = 'meta-sports14.csv'\\n\",\n    \"\\n\",\n    \"meta_file = os.path.join(file_path, file_name)\\n\",\n    \"\\n\",\n    \"df = pd.read_csv(meta_file)\\n\",\n    \"df.sort_values(by=[i_id], inplace=True)\\n\",\n    \"\\n\",\n    \"print('data loaded!')\\n\",\n    \"print(f'shape: {df.shape}')\\n\",\n    \"\\n\",\n    \"df[:3]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 4,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"(91, 10)\\n\",\n      \"(2659, 10)\\n\",\n      \"(40, 10)\\n\",\n      \"(40, 10)\\n\",\n      \"(0, 10)\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"\\n\",\n    \"# sentences: title + brand + category + description | All have title + description\\n\",\n    \"\\n\",\n    \"title_na_df = df[df['title'].isnull()]\\n\",\n    \"print(title_na_df.shape)\\n\",\n    \"\\n\",\n    \"desc_na_df = df[df['description'].isnull()]\\n\",\n    \"print(desc_na_df.shape)\\n\",\n    \"\\n\",\n    \"na_df = df[df['description'].isnull() & df['title'].isnull()]\\n\",\n    \"print(na_df.shape)\\n\",\n    \"\\n\",\n    \"na3_df = df[df['description'].isnull() & df['title'].isnull() & df['brand'].isnull()]\\n\",\n    \"print(na3_df.shape)\\n\",\n    \"\\n\",\n    \"na4_df = df[df['description'].isnull() & df['title'].isnull() & df['brand'].isnull() & df['categories'].isnull()]\\n\",\n    \"print(na4_df.shape)\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 5,\n   \"outputs\": [],\n   \"source\": [\n    \"\\n\",\n    \"df[desc_str] = df[desc_str].fillna(\\\" \\\")\\n\",\n    \"df['title'] = df['title'].fillna(\\\" \\\")\\n\",\n    \"df['brand'] = df['brand'].fillna(\\\" \\\")\\n\",\n    \"df['categories'] = df['categories'].fillna(\\\" \\\")\\n\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 6,\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": \"['Ghost Inc Glock Armorers Tool 3/32 Punch Ghost Sports & Outdoors Hunting & Fishing Hunting Gun Maintenance Gunsmithing Tools Ghost Armorer Tool (1). The GAT is made with a spring steel punch. The diameter is 3/32 of an inch or 2.5mm, this is the same as the OEM tool size. The difference is you will be able to press harder without bending the shaft of this punch. Just a better tool to work on your Glock with.',\\n '5 LED Bicycle Rear Tail Red Bike Torch Laser Beam Lamp Light   Sports & Outdoors Cycling Lights & Reflectors Taillights This newly-designed Laser tail light can emit two parallel lines, to form a virtual lane together with the moving of bicycle on the road. LED flash light and  two lines not only enhance the waring effect strongly and greatly but also improve the safety of night riding.',\\n 'Black Mountain Products Single Resistance Band - Door Anchor and Starter Guide Included Black Mountain Sports & Outdoors Exercise & Fitness Accessories Exercise Bands Black Mountain Products single resistance bands are made out of the highest quality rubber to ensure maximum life and are 99 percent latex free! These bands are ideal for physical therapy, exercise, weight loss, Pilates, muscle toning, muscle strengthening, stretching, rehabilitation, and general health and fitness. B.M.P. resistance bands are great for home use, gym use, offices, and are ideal for travel. B.M.P. single resistance bands are a great alternative to conventional weights and exercise equipment. All Black Mountain Products Resistance bands come with a manufactures warranty.',\\n 'Black Mountain Products Resistance Band Set with Door Anchor, Ankle Strap, Exercise Chart, and Resistance Band Carrying Case Black Mountain Sports & Outdoors Exercise & Fitness Accessories Exercise Bands [if gte mso 9]><xml> <o:OfficeDocumentSettings> <o:AllowPNG  /> </o:OfficeDocumentSettings> </xml><![endif][if gte mso 9]><xml> <w:WordDocument> <w:View>Normal</w:View> <w:Zoom>0</w:Zoom> <w:TrackMoves  /> <w:TrackFormatting  /> <w:PunctuationKerning  /> <w:ValidateAgainstSchemas  /> <w:SaveIfXMLInvalid>false</w:SaveIfXMLInvalid> <w:IgnoreMixedContent>false</w:IgnoreMixedContent> <w:AlwaysShowPlaceholderText>false</w:AlwaysShowPlaceholderText> <w:DoNotPromoteQF  /> <w:LidThemeOther>EN-US</w:LidThemeOther> <w:LidThemeAsian>X-NONE</w:LidThemeAsian> <w:LidThemeComplexScript>X-NONE</w:LidThemeComplexScript> <w:Compatibility> <w:BreakWrappedTables  /> <w:SnapToGridInCell  /> <w:WrapTextWithPunct  /> <w:UseAsianBreakRules  /> <w:DontGrowAutofit  /> <w:SplitPgBreakAndParaMark  /> <w:EnableOpenTypeKerning  /> <w:DontFlipMirrorIndents  /> <w:OverrideTableStyleHps  /> </w:Compatibility> <m:mathPr> <m:mathFont m:val=\\\"Cambria Math\\\"  /> <m:brkBin m:val=\\\"before\\\"  /> <m:brkBinSub m:val=\\\"&#45;-\\\"  /> <m:smallFrac m:val=\\\"off\\\"  /> <m:dispDef  /> <m:lMargin m:val=\\\"0\\\"  /> <m:rMargin m:val=\\\"0\\\"  /> <m:defJc m:val=\\\"centerGroup\\\"  /> <m:wrapIndent m:val=\\\"1440\\\"  /> <m:intLim m:val=\\\"subSup\\\"  /> <m:naryLim m:val=\\\"undOvr\\\"  /> </m:mathPr></w:WordDocument> </xml><![endif][if gte mso 9]><xml> <w:LatentStyles DefLockedState=\\\"false\\\" DefUnhideWhenUsed=\\\"true\\\"   DefSemiHidden=\\\"true\\\" DefQFormat=\\\"false\\\" DefPriority=\\\"99\\\"   LatentStyleCount=\\\"267\\\"> <w:LsdException Locked=\\\"false\\\" Priority=\\\"0\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" QFormat=\\\"true\\\" Name=\\\"Normal\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"9\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" QFormat=\\\"true\\\" Name=\\\"heading 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"9\\\" QFormat=\\\"true\\\" Name=\\\"heading 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"9\\\" QFormat=\\\"true\\\" Name=\\\"heading 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"9\\\" QFormat=\\\"true\\\" Name=\\\"heading 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"9\\\" QFormat=\\\"true\\\" Name=\\\"heading 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"9\\\" QFormat=\\\"true\\\" Name=\\\"heading 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"9\\\" QFormat=\\\"true\\\" Name=\\\"heading 7\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"9\\\" QFormat=\\\"true\\\" Name=\\\"heading 8\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"9\\\" QFormat=\\\"true\\\" Name=\\\"heading 9\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"39\\\" Name=\\\"toc 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"39\\\" Name=\\\"toc 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"39\\\" Name=\\\"toc 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"39\\\" Name=\\\"toc 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"39\\\" Name=\\\"toc 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"39\\\" Name=\\\"toc 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"39\\\" Name=\\\"toc 7\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"39\\\" Name=\\\"toc 8\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"39\\\" Name=\\\"toc 9\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"35\\\" QFormat=\\\"true\\\" Name=\\\"caption\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"10\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" QFormat=\\\"true\\\" Name=\\\"Title\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"1\\\" Name=\\\"Default Paragraph Font\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"11\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" QFormat=\\\"true\\\" Name=\\\"Subtitle\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"22\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" QFormat=\\\"true\\\" Name=\\\"Strong\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"20\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" QFormat=\\\"true\\\" Name=\\\"Emphasis\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"59\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Table Grid\\\"  /> <w:LsdException Locked=\\\"false\\\" UnhideWhenUsed=\\\"false\\\" Name=\\\"Placeholder Text\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"1\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" QFormat=\\\"true\\\" Name=\\\"No Spacing\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"60\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light Shading\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"61\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light List\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"62\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light Grid\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"63\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Shading 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"64\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Shading 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"65\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium List 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"66\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium List 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"67\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"68\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"69\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"70\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Dark List\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"71\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful Shading\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"72\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful List\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"73\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful Grid\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"60\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light Shading Accent 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"61\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light List Accent 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"62\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light Grid Accent 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"63\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Shading 1 Accent 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"64\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Shading 2 Accent 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"65\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium List 1 Accent 1\\\"  /> <w:LsdException Locked=\\\"false\\\" UnhideWhenUsed=\\\"false\\\" Name=\\\"Revision\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"34\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" QFormat=\\\"true\\\" Name=\\\"List Paragraph\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"29\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" QFormat=\\\"true\\\" Name=\\\"Quote\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"30\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" QFormat=\\\"true\\\" Name=\\\"Intense Quote\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"66\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium List 2 Accent 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"67\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 1 Accent 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"68\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 2 Accent 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"69\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 3 Accent 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"70\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Dark List Accent 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"71\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful Shading Accent 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"72\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful List Accent 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"73\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful Grid Accent 1\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"60\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light Shading Accent 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"61\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light List Accent 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"62\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light Grid Accent 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"63\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Shading 1 Accent 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"64\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Shading 2 Accent 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"65\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium List 1 Accent 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"66\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium List 2 Accent 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"67\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 1 Accent 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"68\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 2 Accent 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"69\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 3 Accent 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"70\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Dark List Accent 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"71\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful Shading Accent 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"72\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful List Accent 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"73\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful Grid Accent 2\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"60\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light Shading Accent 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"61\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light List Accent 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"62\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light Grid Accent 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"63\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Shading 1 Accent 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"64\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Shading 2 Accent 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"65\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium List 1 Accent 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"66\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium List 2 Accent 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"67\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 1 Accent 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"68\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 2 Accent 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"69\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 3 Accent 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"70\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Dark List Accent 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"71\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful Shading Accent 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"72\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful List Accent 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"73\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful Grid Accent 3\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"60\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light Shading Accent 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"61\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light List Accent 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"62\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light Grid Accent 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"63\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Shading 1 Accent 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"64\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Shading 2 Accent 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"65\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium List 1 Accent 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"66\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium List 2 Accent 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"67\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 1 Accent 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"68\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 2 Accent 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"69\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 3 Accent 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"70\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Dark List Accent 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"71\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful Shading Accent 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"72\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful List Accent 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"73\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful Grid Accent 4\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"60\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light Shading Accent 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"61\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light List Accent 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"62\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light Grid Accent 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"63\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Shading 1 Accent 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"64\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Shading 2 Accent 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"65\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium List 1 Accent 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"66\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium List 2 Accent 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"67\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 1 Accent 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"68\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 2 Accent 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"69\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 3 Accent 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"70\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Dark List Accent 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"71\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful Shading Accent 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"72\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful List Accent 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"73\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful Grid Accent 5\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"60\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light Shading Accent 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"61\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light List Accent 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"62\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Light Grid Accent 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"63\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Shading 1 Accent 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"64\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Shading 2 Accent 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"65\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium List 1 Accent 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"66\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium List 2 Accent 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"67\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 1 Accent 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"68\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 2 Accent 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"69\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Medium Grid 3 Accent 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"70\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Dark List Accent 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"71\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful Shading Accent 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"72\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful List Accent 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"73\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" Name=\\\"Colorful Grid Accent 6\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"19\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" QFormat=\\\"true\\\" Name=\\\"Subtle Emphasis\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"21\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" QFormat=\\\"true\\\" Name=\\\"Intense Emphasis\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"31\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" QFormat=\\\"true\\\" Name=\\\"Subtle Reference\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"32\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" QFormat=\\\"true\\\" Name=\\\"Intense Reference\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"33\\\" SemiHidden=\\\"false\\\"    UnhideWhenUsed=\\\"false\\\" QFormat=\\\"true\\\" Name=\\\"Book Title\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"37\\\" Name=\\\"Bibliography\\\"  /> <w:LsdException Locked=\\\"false\\\" Priority=\\\"39\\\" QFormat=\\\"true\\\" Name=\\\"TOC Heading\\\"  /> </w:LatentStyles> </xml><![endif][if gte mso 10]> <style>  /* Style Definitions */  table.MsoNormalTable {mso-style-name:\\\"Table Normal\\\"; mso-tstyle-rowband-size:0; mso-tstyle-colband-size:0; mso-style-noshow:yes; mso-style-priority:99; mso-style-parent:\\\"\\\"; mso-padding-alt:0in 5.4pt 0in 5.4pt; mso-para-margin-top:0in; mso-para-margin-right:0in; mso-para-margin-bottom:10.0pt; mso-para-margin-left:0in; line-height:115%; mso-pagination:widow-orphan; font-size:11.0pt; font-family:\\\"Calibri\\\",\\\"sans-serif\\\"; mso-ascii-font-family:Calibri; mso-ascii-theme-font:minor-latin; mso-hansi-font-family:Calibri; mso-hansi-theme-font:minor-latin; mso-bidi-font-family:\\\"Times New Roman\\\"; mso-bidi-theme-font:minor-bidi;} </style> <![endif]Resistance bands, also known as exercise bands, fitness tubes, and resistance cords, are a great way to add variety to your strength training routine. Black Mountain Products resistance bands are made from high quality, natural latex material to ensure maximum life. Each resistance band set comes with a soft cushioned foam handles for comfort during exercise. Resistance bands are an excellent alternative to free weights, workout machines, and traditional body weight exercises. Training with Black Mountain Products resistance bands may assist with rehabilitative exercises, fat loss, building muscle and are ideal for travel.What\\\\'s in the box?This set includes:Cushioned foam handlesDoor anchorCarrying bagAnkle strapStarter guideFive bands of varying resistance:Yellow = 2-to-4 poundsBlue = 4-to-6 poundsGreen = 10-to-12 poundsBlack = 15-to-20 poundsRed = 25-to-30 poundsBenefits of Working with Resistance BandsIn addition to being bulky and immobile, free weights are often limited in the number of exercises you can perform. Resistance bands, on the other hand, offer a surprisingly varied number of training exercises by simply changing your bodily position in order to change the tension on your muscles. Bands take up little space, are mobile, and easy to use on any fitness level.WarrantyThis Black Mountain Products item includes a limited 90-day manufacturer\\\\'s warranty against defects in materials and workmanship.About Black Mountain ProductsBlack Mountain Products manufactures and distributes high-quality home exercise equipment, with a complete line of resistance bands and doorway chin-up bars designed to deliver results in the comfort and convenience of your home, particularly when paired with home exercise programs such as P90X.',\\n 'Outers Universal 32-Piece Blow Molded Gun Cleaning Kit Outers Sports & Outdoors Hunting & Fishing Hunting Gun Maintenance Gun Cleaning Kits Outers now offers this rigid and durable hard case to stow and organize an assortment of gun care products to clean shotguns, rifles, or handguns, quickly and effectively.  Their blow molded design has a specific compartment for each piece within that kit.  Plus, the kit has additional compartments to hold bottles of gun-cleaning chemicals or any other gear you want to keep nearby.',\\n 'Power Hooks (Pair)   Sports & Outdoors Exercise & Fitness Power Hooks (Pair) at Power Systems, Inc.',\\n \\\"Pacific Play Tents Playchute 10' Parachute (Colors and Designs May Vary) PACIFIC PLAY TENTS Sports & Outdoors Other Sports Gymnastics Accessories Parachutes The parachute gambit is a teacher's secret weapon--a tried-and-true way to annihilate the afternoon blahs. At approximately 9 feet in diameter, with handles all the way around for little (or big) hands to grab, this sunny Playchute Parachute is just smaller than the classroom standard. Fling it into the air and watch it drift down. Run underneath it and watch the colors rain down. Throw a few stuffed animals in the center and bounce them around. Simple, satisfying thrills, and worth every penny.--Claire Dederer\\\",\\n 'Find Me 6ft Tunnel PACIFIC PLAY TENTS Sports & Outdoors Leisure Sports & Game Room Trampolines & Accessories Trampolines Find Me 6ft Tunnel. The Pacific Play Tents Find Me Tunnel is perfect for developing cooperative play and enhancing muscle development! This innovative design incorporates a new padded interior system for safer more comfortable playtime. Your little one will enjoy countless hours traveling from one destination to another! Wipes clean with a damp cloth. Features sturdy, hardened spring steel construction. Tunnel collapses flat for easy storage. For children ages 3 years and older. Materials: Polyester Dacron 600 x 300 weave with 600mm PU coating, Steel. Dimensions: 72\\\"L x 19\\\"W x 19\\\"H; 4 lbs',\\n 'Club Champ Super Sized Electic Putt N\\\\' Hazzard Putting Mat Club Champ Sports & Outdoors Golf Training Equipment Putting Mats Non-directional turf simulates real grass; regulation-size cup  is \\\"protected\\\" by sand trap and water hazard; realistic cup and golf hazards  return ball electronicallyLarge 9-by-16-inch putting surfaceUse at home, office, clubhouse, and partiesNo batteries required for operation --',\\n 'Victorinox Swiss Army SwissTool with Pouch Victorinox Sports & Outdoors Outdoor Gear Camping & Hiking Knives & Tools Folding Knives From the renowned company that created the Swiss Army knife a century ago comes this folding tool kit--neatly contained within a single implement that measures just 4-1/2 inches long and 1-1/3 inches thick, weighs just 10 ounces, and comes in a polyester/nylon belt pouch. Its rugged stainless-steel construction and ease of use make this tool kit is a tribute to the precision of Swiss engineering. Tools open individually (no clumping), lock in place, and fold with a push button. The SwissTool carries a lifetime warranty against defects. The tools include pliers; 2-, 3-, 5-, and 7-1/2-mm screwdrivers; Phillips head screwdriver; wire cutter; bottle opener; large knife blade; serrated blade; metal file; metal saw; wood saw; reamer/punch; chisel/scraper; crate opener; wire bender; wire stripper; wire scraper; can opener; 9-inch ruler; 230-cm ruler; electrical crimper; and lanyard hole. --Fred Brack']\"\n     },\n     \"execution_count\": 6,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"sentences = []\\n\",\n    \"for i, row in df.iterrows():\\n\",\n    \"    sen = row['title'] + ' ' + row['brand'] + ' '\\n\",\n    \"    cates = eval(row['categories'])\\n\",\n    \"    if isinstance(cates, list):\\n\",\n    \"        for c in cates[0]:\\n\",\n    \"            sen = sen + c + ' '\\n\",\n    \"    sen += row[desc_str]\\n\",\n    \"    sen = sen.replace('\\\\n', ' ')\\n\",\n    \"\\n\",\n    \"    sentences.append(sen)\\n\",\n    \"\\n\",\n    \"sentences[:10]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 7,\n   \"outputs\": [],\n   \"source\": [\n    \"\\n\",\n    \"course_list = df[i_id].tolist()\\n\",\n    \"#sentences = df[desc_str].tolist()\\n\",\n    \"\\n\",\n    \"assert course_list[-1] == len(course_list) - 1\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 8,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"text encoded!\\n\",\n      \"done!\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"# should `pip install sentence_transformers` first\\n\",\n    \"from sentence_transformers import SentenceTransformer\\n\",\n    \"\\n\",\n    \"model = SentenceTransformer('all-MiniLM-L6-v2')\\n\",\n    \"\\n\",\n    \"sentence_embeddings = model.encode(sentences)\\n\",\n    \"print('text encoded!')\\n\",\n    \"\\n\",\n    \"assert sentence_embeddings.shape[0] == df.shape[0]\\n\",\n    \"np.save(os.path.join(file_path, 'text_feat.npy'), sentence_embeddings)\\n\",\n    \"print('done!')\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 9,\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": \"array([[-0.12623426,  0.03341388, -0.01948772, ..., -0.1013338 ,\\n         0.0514545 ,  0.07334712],\\n       [ 0.0068029 ,  0.00055715, -0.03157376, ...,  0.03421347,\\n         0.02450724,  0.03113373],\\n       [-0.12395922,  0.05546276, -0.00272348, ..., -0.19819073,\\n         0.04171506,  0.05105354],\\n       ...,\\n       [-0.06516663,  0.04306812, -0.00357155, ...,  0.02348825,\\n        -0.02514204,  0.06650119],\\n       [ 0.05071206,  0.03823141, -0.04340539, ...,  0.00951272,\\n         0.05093095,  0.03292951],\\n       [-0.13305898,  0.07934257, -0.01714416, ..., -0.11284354,\\n        -0.00523037,  0.03694083]], dtype=float32)\"\n     },\n     \"execution_count\": 9,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"sentence_embeddings[:10]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 10,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"(18357, 384)\\n\"\n     ]\n    },\n    {\n     \"data\": {\n      \"text/plain\": \"array([[-0.12623426,  0.03341388, -0.01948772, ..., -0.1013338 ,\\n         0.0514545 ,  0.07334712],\\n       [ 0.0068029 ,  0.00055715, -0.03157376, ...,  0.03421347,\\n         0.02450724,  0.03113373],\\n       [-0.12395922,  0.05546276, -0.00272348, ..., -0.19819073,\\n         0.04171506,  0.05105354],\\n       ...,\\n       [-0.06516663,  0.04306812, -0.00357155, ...,  0.02348825,\\n        -0.02514204,  0.06650119],\\n       [ 0.05071206,  0.03823141, -0.04340539, ...,  0.00951272,\\n         0.05093095,  0.03292951],\\n       [-0.13305898,  0.07934257, -0.01714416, ..., -0.11284354,\\n        -0.00523037,  0.03694083]], dtype=float32)\"\n     },\n     \"execution_count\": 10,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"load_txt_feat = np.load('text_feat.npy', allow_pickle=True)\\n\",\n    \"print(load_txt_feat.shape)\\n\",\n    \"load_txt_feat[:10]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"source\": [\n    \"# Image encoder (V0)，following LATTICE, averaging over for missed items\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 11,\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": \"   itemID        asin                                              title  \\\\\\n0       0  1881509818           Ghost Inc Glock Armorers Tool 3/32 Punch   \\n1       1  2094869245  5 LED Bicycle Rear Tail Red Bike Torch Laser B...   \\n2       2  7245456259  Black Mountain Products Single Resistance Band...   \\n3       3  7245456313  Black Mountain Products Resistance Band Set wi...   \\n4       4  B000002NUS  Outers Universal 32-Piece Blow Molded Gun Clea...   \\n\\n   price                                              imUrl  \\\\\\n0   9.99  http://ecx.images-amazon.com/images/I/21iMxsyD...   \\n1   8.26  http://ecx.images-amazon.com/images/I/51RtwnJw...   \\n2  10.49  http://ecx.images-amazon.com/images/I/411Ikpf1...   \\n3  32.99  http://ecx.images-amazon.com/images/I/51FdHlZS...   \\n4  21.99  http://ecx.images-amazon.com/images/I/510GjWgd...   \\n\\n                                             related           brand  \\\\\\n0  {'also_bought': ['B000U3YWEM', 'B000U401J6', '...           Ghost   \\n1  {'also_bought': ['B0081O93N2', 'B00EYTCHJA', '...                   \\n2  {'also_bought': ['B00DDBS2JE', 'B00H1KNHE8', '...  Black Mountain   \\n3  {'also_bought': ['1612431712', 'B00GSBMW2Y', '...  Black Mountain   \\n4  {'also_bought': ['B000PW64JY', 'B0010KHNEU', '...          Outers   \\n\\n                                          categories  \\\\\\n0  [['Sports & Outdoors', 'Hunting & Fishing', 'H...   \\n1  [['Sports & Outdoors', 'Cycling', 'Lights & Re...   \\n2  [['Sports & Outdoors', 'Exercise & Fitness', '...   \\n3  [['Sports & Outdoors', 'Exercise & Fitness', '...   \\n4  [['Sports & Outdoors', 'Hunting & Fishing', 'H...   \\n\\n                           salesRank  \\\\\\n0  {'Sports &amp; Outdoors': 172909}   \\n1   {'Sports &amp; Outdoors': 14293}   \\n2    {'Sports &amp; Outdoors': 1010}   \\n3      {'Sports &amp; Outdoors': 15}   \\n4   {'Sports &amp; Outdoors': 26738}   \\n\\n                                         description  \\n0  Ghost Armorer Tool (1). The GAT is made with a...  \\n1  This newly-designed Laser tail light can emit ...  \\n2  Black Mountain Products single resistance band...  \\n3  [if gte mso 9]><xml> <o:OfficeDocumentSettings...  \\n4  Outers now offers this rigid and durable hard ...  \",\n      \"text/html\": \"<div>\\n<style scoped>\\n    .dataframe tbody tr th:only-of-type {\\n        vertical-align: middle;\\n    }\\n\\n    .dataframe tbody tr th {\\n        vertical-align: top;\\n    }\\n\\n    .dataframe thead th {\\n        text-align: right;\\n    }\\n</style>\\n<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n  <thead>\\n    <tr style=\\\"text-align: right;\\\">\\n      <th></th>\\n      <th>itemID</th>\\n      <th>asin</th>\\n      <th>title</th>\\n      <th>price</th>\\n      <th>imUrl</th>\\n      <th>related</th>\\n      <th>brand</th>\\n      <th>categories</th>\\n      <th>salesRank</th>\\n      <th>description</th>\\n    </tr>\\n  </thead>\\n  <tbody>\\n    <tr>\\n      <th>0</th>\\n      <td>0</td>\\n      <td>1881509818</td>\\n      <td>Ghost Inc Glock Armorers Tool 3/32 Punch</td>\\n      <td>9.99</td>\\n      <td>http://ecx.images-amazon.com/images/I/21iMxsyD...</td>\\n      <td>{'also_bought': ['B000U3YWEM', 'B000U401J6', '...</td>\\n      <td>Ghost</td>\\n      <td>[['Sports &amp; Outdoors', 'Hunting &amp; Fishing', 'H...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 172909}</td>\\n      <td>Ghost Armorer Tool (1). The GAT is made with a...</td>\\n    </tr>\\n    <tr>\\n      <th>1</th>\\n      <td>1</td>\\n      <td>2094869245</td>\\n      <td>5 LED Bicycle Rear Tail Red Bike Torch Laser B...</td>\\n      <td>8.26</td>\\n      <td>http://ecx.images-amazon.com/images/I/51RtwnJw...</td>\\n      <td>{'also_bought': ['B0081O93N2', 'B00EYTCHJA', '...</td>\\n      <td></td>\\n      <td>[['Sports &amp; Outdoors', 'Cycling', 'Lights &amp; Re...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 14293}</td>\\n      <td>This newly-designed Laser tail light can emit ...</td>\\n    </tr>\\n    <tr>\\n      <th>2</th>\\n      <td>2</td>\\n      <td>7245456259</td>\\n      <td>Black Mountain Products Single Resistance Band...</td>\\n      <td>10.49</td>\\n      <td>http://ecx.images-amazon.com/images/I/411Ikpf1...</td>\\n      <td>{'also_bought': ['B00DDBS2JE', 'B00H1KNHE8', '...</td>\\n      <td>Black Mountain</td>\\n      <td>[['Sports &amp; Outdoors', 'Exercise &amp; Fitness', '...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 1010}</td>\\n      <td>Black Mountain Products single resistance band...</td>\\n    </tr>\\n    <tr>\\n      <th>3</th>\\n      <td>3</td>\\n      <td>7245456313</td>\\n      <td>Black Mountain Products Resistance Band Set wi...</td>\\n      <td>32.99</td>\\n      <td>http://ecx.images-amazon.com/images/I/51FdHlZS...</td>\\n      <td>{'also_bought': ['1612431712', 'B00GSBMW2Y', '...</td>\\n      <td>Black Mountain</td>\\n      <td>[['Sports &amp; Outdoors', 'Exercise &amp; Fitness', '...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 15}</td>\\n      <td>[if gte mso 9]&gt;&lt;xml&gt; &lt;o:OfficeDocumentSettings...</td>\\n    </tr>\\n    <tr>\\n      <th>4</th>\\n      <td>4</td>\\n      <td>B000002NUS</td>\\n      <td>Outers Universal 32-Piece Blow Molded Gun Clea...</td>\\n      <td>21.99</td>\\n      <td>http://ecx.images-amazon.com/images/I/510GjWgd...</td>\\n      <td>{'also_bought': ['B000PW64JY', 'B0010KHNEU', '...</td>\\n      <td>Outers</td>\\n      <td>[['Sports &amp; Outdoors', 'Hunting &amp; Fishing', 'H...</td>\\n      <td>{'Sports &amp;amp; Outdoors': 26738}</td>\\n      <td>Outers now offers this rigid and durable hard ...</td>\\n    </tr>\\n  </tbody>\\n</table>\\n</div>\"\n     },\n     \"execution_count\": 11,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"df[:5]\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 12,\n   \"outputs\": [],\n   \"source\": [\n    \"import array\\n\",\n    \"\\n\",\n    \"def readImageFeatures(path):\\n\",\n    \"  f = open(path, 'rb')\\n\",\n    \"  while True:\\n\",\n    \"    asin = f.read(10).decode('UTF-8')\\n\",\n    \"    if asin == '': break\\n\",\n    \"    a = array.array('f')\\n\",\n    \"    a.fromfile(f, 4096)\\n\",\n    \"    yield asin, a.tolist()\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 13,\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"# of items not in processed image features: 180\\n\",\n      \"done!\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"\\n\",\n    \"img_data = readImageFeatures(\\\"image_features_Sports_and_Outdoors.b\\\")\\n\",\n    \"item2id = dict(zip(df['asin'], df['itemID']))\\n\",\n    \"\\n\",\n    \"feats = {}\\n\",\n    \"avg = []\\n\",\n    \"for d in img_data:\\n\",\n    \"    if d[0] in item2id:\\n\",\n    \"        feats[int(item2id[d[0]])] = d[1]\\n\",\n    \"        avg.append(d[1])\\n\",\n    \"avg = np.array(avg).mean(0).tolist()\\n\",\n    \"\\n\",\n    \"ret = []\\n\",\n    \"non_no = []\\n\",\n    \"for i in range(len(item2id)):\\n\",\n    \"    if i in feats:\\n\",\n    \"        ret.append(feats[i])\\n\",\n    \"    else:\\n\",\n    \"        non_no.append(i)\\n\",\n    \"        ret.append(avg)\\n\",\n    \"\\n\",\n    \"print('# of items not in processed image features:', len(non_no))\\n\",\n    \"assert len(ret) == len(item2id)\\n\",\n    \"np.save('image_feat.npy', np.array(ret))\\n\",\n    \"np.savetxt(\\\"missed_img_itemIDs.csv\\\", non_no, delimiter =\\\",\\\", fmt ='%d')\\n\",\n    \"print('done!')\"\n   ],\n   \"metadata\": {\n    \"collapsed\": false,\n    \"pycharm\": {\n     \"name\": \"#%%\\n\"\n    }\n   }\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 2\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython2\",\n   \"version\": \"2.7.6\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 0\n}"
  },
  {
    "path": "preprocessing/README.md",
    "content": "# Preprocessing from raw data 从原始数据处理\n- The following preprocessing steps can be quite tedious. Please post issues if you cannot run the scripts.\n\n- datasets: [Amazon](http://jmcauley.ucsd.edu/data/amazon/links.html)  \n-- Rating file in `Files/Small subsets for experimentation`  \n-- Meta files in `Per-category files`, [metadata], [image features]  \n\nThere has been an issue with the dataset site lately, \nas it automatically redirects to an updated version of the dataset. \nKeep pressing `ESC` to stop the redirecting action.\n\n## Step by step\n1. Performing 5-core filtering, re-indexing - `run 0rating2inter.ipynb`\n2. Train/valid/test data splitting - `run 1spliting.ipynb`\n3. Reindexing feature IDs with generated IDs in step 1 - `run 2reindex-feat.ipynb`\n4. Encoding text/image features - `run 3feat-encoder.ipynb`\n5. Filling your data description file `*.yaml` under `src/configs/dataset` with the generated file names `*.inter`, `*-feat.npy`, etc.\n6. Specifying your evaluated dataset by cmd: `python -d sports -m BM3`.\n\n\n## DualGNN requires additional operation to generate the u-u graph\n1. Run `dualgnn-gen-u-u-matrix.py` on a dataset `baby`:  \n`python dualgnn-gen-u-u-matrix.py -d baby`\n2. The generated u-u graph should be located in the same dir as the dataset.\n"
  },
  {
    "path": "preprocessing/dualgnn-gen-u-u-matrix.py",
    "content": "# 对应于Preprocess-ml-imdb.py文件\n\n\nimport numpy as np\nfrom collections import defaultdict\nfrom tqdm import tqdm\nimport torch\nimport pandas as pd\nimport os\nimport yaml\nimport argparse\n\n\ndef gen_user_matrix(all_edge, no_users):\n    edge_dict = defaultdict(set)\n\n    for edge in all_edge:\n        user, item = edge\n        edge_dict[user].add(item)\n\n    min_user = 0             # 0\n    num_user = no_users      # in our case, users/items ids start from 1\n    user_graph_matrix = torch.zeros(num_user, num_user)\n    key_list = list(edge_dict.keys())\n    key_list.sort()\n    bar = tqdm(total=len(key_list))\n    for head in range(len(key_list)):\n        bar.update(1)\n        for rear in range(head+1, len(key_list)):\n            head_key = key_list[head]\n            rear_key = key_list[rear]\n            # print(head_key, rear_key)\n            item_head = edge_dict[head_key]\n            item_rear = edge_dict[rear_key]\n            # print(len(user_head.intersection(user_rear)))\n            inter_len = len(item_head.intersection(item_rear))\n            if inter_len > 0:\n                user_graph_matrix[head_key-min_user][rear_key-min_user] = inter_len\n                user_graph_matrix[rear_key-min_user][head_key-min_user] = inter_len\n    bar.close()\n\n    return user_graph_matrix\n\n\nif __name__ == \t'__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--dataset', '-d', type=str, default='games', help='name of dataset')\n    args = parser.parse_args()\n    dataset_name = args.dataset\n    print(f'Generating u-u matrix for {dataset_name} ...\\n')\n\n    config = {}\n    os.chdir('../src')\n    cur_dir = os.getcwd()\n    con_dir = os.path.join(cur_dir, 'configs') # get config dir\n    overall_config_file = os.path.join(con_dir, \"overall.yaml\")\n    dataset_config_file = os.path.join(con_dir, \"dataset\", \"{}.yaml\".format(dataset_name))\n    conf_files = [overall_config_file, dataset_config_file]\n    # load configs\n    for file in conf_files:\n        if os.path.isfile(file):\n            with open(file, 'r', encoding='utf-8') as f:\n                tmp_d = yaml.safe_load(f)\n                config.update(tmp_d)\n\n    dataset_path = os.path.abspath(config['data_path'] + dataset_name)\n    print('data path:\\t', dataset_path)\n    uid_field = config['USER_ID_FIELD']\n    iid_field = config['ITEM_ID_FIELD']\n    train_df = pd.read_csv(os.path.join(dataset_path, config['inter_file_name']), sep='\\t')\n    num_user = len(pd.unique(train_df[uid_field]))\n    train_df = train_df[train_df['x_label'] == 0].copy()\n    train_data = train_df[[uid_field, iid_field]].to_numpy()\n    # item_item_pairs =[]\n    user_graph_matrix = gen_user_matrix(train_data, num_user)\n    #####################################################################generate user-user matrix\n    # pdb.set_trace()\n    user_graph = user_graph_matrix\n    # user_num = torch.zeros(num_user)\n    user_num = torch.zeros(num_user)\n\n    user_graph_dict = {}\n    item_graph_dict = {}\n    edge_list_i = []\n    edge_list_j = []\n\n    for i in range(num_user):\n        user_num[i] = len(torch.nonzero(user_graph[i]))\n        print(\"this is \", i, \"num\", user_num[i])\n\n    for i in range(num_user):\n        if user_num[i] <= 200:\n            user_i = torch.topk(user_graph[i],int(user_num[i]))\n            edge_list_i =user_i.indices.numpy().tolist()\n            edge_list_j =user_i.values.numpy().tolist()\n            edge_list = [edge_list_i, edge_list_j]\n            user_graph_dict[i] = edge_list\n        else:\n            user_i = torch.topk(user_graph[i], 200)\n            edge_list_i = user_i.indices.numpy().tolist()\n            edge_list_j = user_i.values.numpy().tolist()\n            edge_list = [edge_list_i, edge_list_j]\n            user_graph_dict[i] = edge_list\n    # pdb.set_trace()\n    np.save(os.path.join(dataset_path, config['user_graph_dict_file']), user_graph_dict, allow_pickle=True)\n"
  },
  {
    "path": "requirements.txt",
    "content": "numpy==1.21.5\npandas==1.3.5\npython==3.7.11\nscipy==1.7.3\ntorch==1.11.0\npyyaml==6.0\n"
  },
  {
    "path": "src/common/abstract_recommender.py",
    "content": "# coding: utf-8\n# @email  : enoche.chow@gmail.com\n\nimport os\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\n\nclass AbstractRecommender(nn.Module):\n    r\"\"\"Base class for all models\n    \"\"\"\n    def pre_epoch_processing(self):\n        pass\n\n    def post_epoch_processing(self):\n        pass\n\n    def calculate_loss(self, interaction):\n        r\"\"\"Calculate the training loss for a batch data.\n\n        Args:\n            interaction (Interaction): Interaction class of the batch.\n\n        Returns:\n            torch.Tensor: Training loss, shape: []\n        \"\"\"\n        raise NotImplementedError\n\n    def predict(self, interaction):\n        r\"\"\"Predict the scores between users and items.\n\n        Args:\n            interaction (Interaction): Interaction class of the batch.\n\n        Returns:\n            torch.Tensor: Predicted scores for given users and items, shape: [batch_size]\n        \"\"\"\n        raise NotImplementedError\n\n    def full_sort_predict(self, interaction):\n        r\"\"\"full sort prediction function.\n        Given users, calculate the scores between users and all candidate items.\n\n        Args:\n            interaction (Interaction): Interaction class of the batch.\n\n        Returns:\n            torch.Tensor: Predicted scores for given users and all candidate items,\n            shape: [n_batch_users * n_candidate_items]\n        \"\"\"\n        raise NotImplementedError\n    #\n    # def __str__(self):\n    #     \"\"\"\n    #     Model prints with number of trainable parameters\n    #     \"\"\"\n    #     model_parameters = filter(lambda p: p.requires_grad, self.parameters())\n    #     params = sum([np.prod(p.size()) for p in model_parameters])\n    #     return super().__str__() + '\\nTrainable parameters: {}'.format(params)\n\n    def __str__(self):\n        \"\"\"\n        Model prints with number of trainable parameters\n        \"\"\"\n        model_parameters = self.parameters()\n        params = sum([np.prod(p.size()) for p in model_parameters])\n        return super().__str__() + '\\nTrainable parameters: {}'.format(params)\n\n\nclass GeneralRecommender(AbstractRecommender):\n    \"\"\"This is a abstract general recommender. All the general model should implement this class.\n    The base general recommender class provide the basic dataset and parameters information.\n    \"\"\"\n    def __init__(self, config, dataloader):\n        super(GeneralRecommender, self).__init__()\n\n        # load dataset info\n        self.USER_ID = config['USER_ID_FIELD']\n        self.ITEM_ID = config['ITEM_ID_FIELD']\n        self.NEG_ITEM_ID = config['NEG_PREFIX'] + self.ITEM_ID\n        self.n_users = dataloader.dataset.get_user_num()\n        self.n_items = dataloader.dataset.get_item_num()\n\n        # load parameters info\n        self.batch_size = config['train_batch_size']\n        self.device = config['device']\n\n        # load encoded features here\n        self.v_feat, self.t_feat = None, None\n        if not config['end2end'] and config['is_multimodal_model']:\n            dataset_path = os.path.abspath(config['data_path'] + config['dataset'])\n            # if file exist?\n            v_feat_file_path = os.path.join(dataset_path, config['vision_feature_file'])\n            t_feat_file_path = os.path.join(dataset_path, config['text_feature_file'])\n            if os.path.isfile(v_feat_file_path):\n                self.v_feat = torch.from_numpy(np.load(v_feat_file_path, allow_pickle=True)).type(torch.FloatTensor).to(\n                    self.device)\n            if os.path.isfile(t_feat_file_path):\n                self.t_feat = torch.from_numpy(np.load(t_feat_file_path, allow_pickle=True)).type(torch.FloatTensor).to(\n                    self.device)\n\n            assert self.v_feat is not None or self.t_feat is not None, 'Features all NONE'\n"
  },
  {
    "path": "src/common/encoders.py",
    "content": "import copy\nimport math\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom common.abstract_recommender import GeneralRecommender\nimport scipy.sparse as sp\n\n\nclass LightGCN_Encoder(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(LightGCN_Encoder, self).__init__(config, dataset)\n        # load dataset info\n        self.interaction_matrix = dataset.inter_matrix(\n            form='coo').astype(np.float32)\n\n        self.user_count = self.n_users\n        self.item_count = self.n_items\n        self.latent_size = config['embedding_size']\n        self.n_layers = 3 if config['n_layers'] is None else config['n_layers']\n        self.layers = [self.latent_size] * self.n_layers\n\n        self.drop_ratio = 1.0\n        self.drop_flag = True\n\n        self.embedding_dict = self._init_model()\n        self.sparse_norm_adj = self.get_norm_adj_mat().to(self.device)\n\n    def _init_model(self):\n        initializer = nn.init.xavier_uniform_\n        embedding_dict = nn.ParameterDict({\n            'user_emb': nn.Parameter(initializer(torch.empty(self.user_count, self.latent_size))),\n            'item_emb': nn.Parameter(initializer(torch.empty(self.item_count, self.latent_size)))\n        })\n\n        return embedding_dict\n\n    def get_norm_adj_mat(self):\n        r\"\"\"Get the normalized interaction matrix of users and items.\n\n        Construct the square matrix from the training data and normalize it\n        using the laplace matrix.\n\n        .. math::\n            A_{hat} = D^{-0.5} \\times A \\times D^{-0.5}\n\n        Returns:\n            Sparse tensor of the normalized interaction matrix.\n        \"\"\"\n        # build adj matrix\n        A = sp.dok_matrix((self.n_users + self.n_items,\n                           self.n_users + self.n_items), dtype=np.float32)\n        inter_M = self.interaction_matrix\n        inter_M_t = self.interaction_matrix.transpose()\n        data_dict = dict(zip(zip(inter_M.row, inter_M.col+self.n_users),\n                             [1]*inter_M.nnz))\n        data_dict.update(dict(zip(zip(inter_M_t.row+self.n_users, inter_M_t.col),\n                                  [1]*inter_M_t.nnz)))\n        A._update(data_dict)\n        # norm adj matrix\n        sumArr = (A > 0).sum(axis=1)\n        # add epsilon to avoid Devide by zero Warning\n        diag = np.array(sumArr.flatten())[0] + 1e-7\n        diag = np.power(diag, -0.5)\n        D = sp.diags(diag)\n        L = D * A * D\n        # covert norm_adj matrix to tensor\n        L = sp.coo_matrix(L)\n        row = L.row\n        col = L.col\n        i = torch.LongTensor([row, col])\n        data = torch.FloatTensor(L.data)\n        SparseL = torch.sparse.FloatTensor(i, data, torch.Size(L.shape))\n        return SparseL\n\n    def sparse_dropout(self, x, rate, noise_shape):\n        random_tensor = 1 - rate\n        random_tensor += torch.rand(noise_shape).to(self.device)\n        dropout_mask = torch.floor(random_tensor).type(torch.bool)\n        i = x._indices()\n        v = x._values()\n\n        i = i[:, dropout_mask]\n        v = v[dropout_mask]\n\n        out = torch.sparse.FloatTensor(i, v, x.shape).to(self.device)\n        return out * (1. / (1 - rate))\n\n    def forward(self, inputs):\n        A_hat = self.sparse_dropout(self.sparse_norm_adj,\n                                    np.random.random() * self.drop_ratio,\n                                    self.sparse_norm_adj._nnz()) if self.drop_flag else self.sparse_norm_adj\n\n        ego_embeddings = torch.cat([self.embedding_dict['user_emb'], self.embedding_dict['item_emb']], 0)\n        all_embeddings = [ego_embeddings]\n\n        for k in range(len(self.layers)):\n            ego_embeddings = torch.sparse.mm(A_hat, ego_embeddings)\n            all_embeddings += [ego_embeddings]\n\n        all_embeddings = torch.stack(all_embeddings, dim=1)\n        all_embeddings = torch.mean(all_embeddings, dim=1)\n\n        user_all_embeddings = all_embeddings[:self.user_count, :]\n        item_all_embeddings = all_embeddings[self.user_count:, :]\n\n        users, items = inputs[0], inputs[1]\n        user_embeddings = user_all_embeddings[users, :]\n        item_embeddings = item_all_embeddings[items, :]\n\n        return user_embeddings, item_embeddings\n\n    @torch.no_grad()\n    def get_embedding(self):\n        A_hat = self.sparse_norm_adj\n\n        ego_embeddings = torch.cat([self.embedding_dict['user_emb'], self.embedding_dict['item_emb']], 0)\n        all_embeddings = [ego_embeddings]\n\n        for k in range(len(self.layers)):\n            ego_embeddings = torch.sparse.mm(A_hat, ego_embeddings)\n            all_embeddings += [ego_embeddings]\n\n        all_embeddings = torch.stack(all_embeddings, dim=1)\n        all_embeddings = torch.mean(all_embeddings, dim=1)\n\n        user_all_embeddings = all_embeddings[:self.user_count, :]\n        item_all_embeddings = all_embeddings[self.user_count:, :]\n\n        return user_all_embeddings, item_all_embeddings"
  },
  {
    "path": "src/common/init.py",
    "content": "# coding: utf-8\n# @email  : enoche.chow@gmail.com\n\nimport torch.nn as nn\nfrom torch.nn.init import xavier_normal_, xavier_uniform_, constant_\n\n\ndef xavier_normal_initialization(module):\n    r\"\"\" using `xavier_normal_`_ in PyTorch to initialize the parameters in\n    nn.Embedding and nn.Linear layers. For bias in nn.Linear layers,\n    using constant 0 to initialize.\n\n    .. _`xavier_normal_`:\n        https://pytorch.org/docs/stable/nn.init.html?highlight=xavier_normal_#torch.nn.init.xavier_normal_\n\n    Examples:\n        >>> self.apply(xavier_normal_initialization)\n    \"\"\"\n    if isinstance(module, nn.Embedding):\n        xavier_normal_(module.weight.data)\n    elif isinstance(module, nn.Linear):\n        xavier_normal_(module.weight.data)\n        if module.bias is not None:\n            constant_(module.bias.data, 0)\n\n\ndef xavier_uniform_initialization(module):\n    r\"\"\" using `xavier_uniform_`_ in PyTorch to initialize the parameters in\n    nn.Embedding and nn.Linear layers. For bias in nn.Linear layers,\n    using constant 0 to initialize.\n\n    .. _`xavier_uniform_`:\n        https://pytorch.org/docs/stable/nn.init.html?highlight=xavier_uniform_#torch.nn.init.xavier_uniform_\n\n    Examples:\n        >>> self.apply(xavier_uniform_initialization)\n    \"\"\"\n    if isinstance(module, nn.Embedding) or isinstance(module, nn.Parameter):\n        xavier_uniform_(module.weight.data)\n    elif isinstance(module, nn.Linear):\n        xavier_uniform_(module.weight.data)\n        if module.bias is not None:\n            constant_(module.bias.data, 0)\n\n"
  },
  {
    "path": "src/common/loss.py",
    "content": "# coding: utf-8\n# @email  : enoche.chow@gmail.com\n\n\nimport torch\nimport torch.nn as nn\n\n\nclass BPRLoss(nn.Module):\n\n    \"\"\" BPRLoss, based on Bayesian Personalized Ranking\n\n    Args:\n        - gamma(float): Small value to avoid division by zero\n\n    Shape:\n        - Pos_score: (N)\n        - Neg_score: (N), same shape as the Pos_score\n        - Output: scalar.\n\n    Examples::\n\n        >>> loss = BPRLoss()\n        >>> pos_score = torch.randn(3, requires_grad=True)\n        >>> neg_score = torch.randn(3, requires_grad=True)\n        >>> output = loss(pos_score, neg_score)\n        >>> output.backward()\n    \"\"\"\n    def __init__(self, gamma=1e-10):\n        super(BPRLoss, self).__init__()\n        self.gamma = gamma\n\n    def forward(self, pos_score, neg_score):\n        loss = - torch.log(self.gamma + torch.sigmoid(pos_score - neg_score)).mean()\n        return loss\n\n\nclass EmbLoss(nn.Module):\n    \"\"\" EmbLoss, regularization on embeddings\n\n    \"\"\"\n    def __init__(self, norm=2):\n        super(EmbLoss, self).__init__()\n        self.norm = norm\n\n    def forward(self, *embeddings):\n        emb_loss = torch.zeros(1).to(embeddings[-1].device)\n        for embedding in embeddings:\n            emb_loss += torch.norm(embedding, p=self.norm)\n        emb_loss /= embeddings[-1].shape[0]\n        return emb_loss\n\n\nclass L2Loss(nn.Module):\n    def __init__(self):\n        super(L2Loss, self).__init__()\n\n    def forward(self, *embeddings):\n        l2_loss = torch.zeros(1).to(embeddings[-1].device)\n        for embedding in embeddings:\n            l2_loss += torch.sum(embedding**2)*0.5\n        return l2_loss"
  },
  {
    "path": "src/common/trainer.py",
    "content": "# coding: utf-8\n# @email: enoche.chow@gmail.com\n\nr\"\"\"\n################################\n\"\"\"\n\nimport os\nimport itertools\nimport torch\nimport torch.optim as optim\nfrom torch.nn.utils.clip_grad import clip_grad_norm_\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom time import time\nfrom logging import getLogger\n\nfrom utils.utils import get_local_time, early_stopping, dict2str\nfrom utils.topk_evaluator import TopKEvaluator\n\n\nclass AbstractTrainer(object):\n    r\"\"\"Trainer Class is used to manage the training and evaluation processes of recommender system models.\n    AbstractTrainer is an abstract class in which the fit() and evaluate() method should be implemented according\n    to different training and evaluation strategies.\n    \"\"\"\n\n    def __init__(self, config, model):\n        self.config = config\n        self.model = model\n\n    def fit(self, train_data):\n        r\"\"\"Train the model based on the train data.\n\n        \"\"\"\n        raise NotImplementedError('Method [next] should be implemented.')\n\n    def evaluate(self, eval_data):\n        r\"\"\"Evaluate the model based on the eval data.\n\n        \"\"\"\n\n        raise NotImplementedError('Method [next] should be implemented.')\n\n\nclass Trainer(AbstractTrainer):\n    r\"\"\"The basic Trainer for basic training and evaluation strategies in recommender systems. This class defines common\n    functions for training and evaluation processes of most recommender system models, including fit(), evaluate(),\n   and some other features helpful for model training and evaluation.\n\n    Generally speaking, this class can serve most recommender system models, If the training process of the model is to\n    simply optimize a single loss without involving any complex training strategies, such as adversarial learning,\n    pre-training and so on.\n\n    Initializing the Trainer needs two parameters: `config` and `model`. `config` records the parameters information\n    for controlling training and evaluation, such as `learning_rate`, `epochs`, `eval_step` and so on.\n    More information can be found in [placeholder]. `model` is the instantiated object of a Model Class.\n\n    \"\"\"\n\n    def __init__(self, config, model, mg=False):\n        super(Trainer, self).__init__(config, model)\n\n        self.logger = getLogger()\n        self.learner = config['learner']\n        self.learning_rate = config['learning_rate']\n        self.epochs = config['epochs']\n        self.eval_step = min(config['eval_step'], self.epochs)\n        self.stopping_step = config['stopping_step']\n        self.clip_grad_norm = config['clip_grad_norm']\n        self.valid_metric = config['valid_metric'].lower()\n        self.valid_metric_bigger = config['valid_metric_bigger']\n        self.test_batch_size = config['eval_batch_size']\n        self.device = config['device']\n        self.weight_decay = 0.0\n        if config['weight_decay'] is not None:\n            wd = config['weight_decay']\n            self.weight_decay = eval(wd) if isinstance(wd, str) else wd\n\n        self.req_training = config['req_training']\n\n        self.start_epoch = 0\n        self.cur_step = 0\n\n        tmp_dd = {}\n        for j, k in list(itertools.product(config['metrics'], config['topk'])):\n            tmp_dd[f'{j.lower()}@{k}'] = 0.0\n        self.best_valid_score = -1\n        self.best_valid_result = tmp_dd\n        self.best_test_upon_valid = tmp_dd\n        self.train_loss_dict = dict()\n        self.optimizer = self._build_optimizer()\n\n        #fac = lambda epoch: 0.96 ** (epoch / 50)\n        lr_scheduler = config['learning_rate_scheduler']        # check zero?\n        fac = lambda epoch: lr_scheduler[0] ** (epoch / lr_scheduler[1])\n        scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=fac)\n        self.lr_scheduler = scheduler\n\n        self.eval_type = config['eval_type']\n        self.evaluator = TopKEvaluator(config)\n\n        self.item_tensor = None\n        self.tot_item_num = None\n        self.mg = mg\n        self.alpha1 = config['alpha1']\n        self.alpha2 = config['alpha2']\n        self.beta = config['beta']\n\n    def _build_optimizer(self):\n        r\"\"\"Init the Optimizer\n\n        Returns:\n            torch.optim: the optimizer\n        \"\"\"\n        if self.learner.lower() == 'adam':\n            optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)\n        elif self.learner.lower() == 'sgd':\n            optimizer = optim.SGD(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)\n        elif self.learner.lower() == 'adagrad':\n            optimizer = optim.Adagrad(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)\n        elif self.learner.lower() == 'rmsprop':\n            optimizer = optim.RMSprop(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)\n        else:\n            self.logger.warning('Received unrecognized optimizer, set default Adam optimizer')\n            optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)\n        return optimizer\n\n    def _train_epoch(self, train_data, epoch_idx, loss_func=None):\n        r\"\"\"Train the model in an epoch\n\n        Args:\n            train_data (DataLoader): The train data.\n            epoch_idx (int): The current epoch id.\n            loss_func (function): The loss function of :attr:`model`. If it is ``None``, the loss function will be\n                :attr:`self.model.calculate_loss`. Defaults to ``None``.\n\n        Returns:\n            float/tuple: The sum of loss returned by all batches in this epoch. If the loss in each batch contains\n            multiple parts and the model return these multiple parts loss instead of the sum of loss, It will return a\n            tuple which includes the sum of loss in each part.\n        \"\"\"\n        if not self.req_training:\n            return 0.0, []\n        self.model.train()\n        loss_func = loss_func or self.model.calculate_loss\n        total_loss = None\n        loss_batches = []\n        for batch_idx, interaction in enumerate(train_data):\n            self.optimizer.zero_grad()\n            second_inter = interaction.clone()\n            losses = loss_func(interaction)\n            \n            if isinstance(losses, tuple):\n                loss = sum(losses)\n                loss_tuple = tuple(per_loss.item() for per_loss in losses)\n                total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))\n            else:\n                loss = losses\n                total_loss = losses.item() if total_loss is None else total_loss + losses.item()\n            if self._check_nan(loss):\n                self.logger.info('Loss is nan at epoch: {}, batch index: {}. Exiting.'.format(epoch_idx, batch_idx))\n                return loss, torch.tensor(0.0)\n            \n            if self.mg and batch_idx % self.beta == 0:\n                first_loss = self.alpha1 * loss\n                first_loss.backward()\n\n                self.optimizer.step()\n                self.optimizer.zero_grad()\n                \n                losses = loss_func(second_inter)\n                if isinstance(losses, tuple):\n                    loss = sum(losses)\n                else:\n                    loss = losses\n                    \n                if self._check_nan(loss):\n                    self.logger.info('Loss is nan at epoch: {}, batch index: {}. Exiting.'.format(epoch_idx, batch_idx))\n                    return loss, torch.tensor(0.0)\n                second_loss = -1 * self.alpha2 * loss\n                second_loss.backward()\n            else:\n                loss.backward()\n                \n            if self.clip_grad_norm:\n                clip_grad_norm_(self.model.parameters(), **self.clip_grad_norm)\n            self.optimizer.step()\n            loss_batches.append(loss.detach())\n            # for test\n            #if batch_idx == 0:\n            #    break\n        return total_loss, loss_batches\n\n    def _valid_epoch(self, valid_data):\n        r\"\"\"Valid the model with valid data\n\n        Args:\n            valid_data (DataLoader): the valid data\n\n        Returns:\n            float: valid score\n            dict: valid result\n        \"\"\"\n        valid_result = self.evaluate(valid_data)\n        valid_score = valid_result[self.valid_metric] if self.valid_metric else valid_result['NDCG@20']\n        return valid_score, valid_result\n\n    def _check_nan(self, loss):\n        if torch.isnan(loss):\n            #raise ValueError('Training loss is nan')\n            return True\n\n    def _generate_train_loss_output(self, epoch_idx, s_time, e_time, losses):\n        train_loss_output = 'epoch %d training [time: %.2fs, ' % (epoch_idx, e_time - s_time)\n        if isinstance(losses, tuple):\n            train_loss_output = ', '.join('train_loss%d: %.4f' % (idx + 1, loss) for idx, loss in enumerate(losses))\n        else:\n            train_loss_output += 'train loss: %.4f' % losses\n        return train_loss_output + ']'\n\n    def fit(self, train_data, valid_data=None, test_data=None, saved=False, verbose=True):\n        r\"\"\"Train the model based on the train data and the valid data.\n\n        Args:\n            train_data (DataLoader): the train data\n            valid_data (DataLoader, optional): the valid data, default: None.\n                                               If it's None, the early_stopping is invalid.\n            test_data (DataLoader, optional): None\n            verbose (bool, optional): whether to write training and evaluation information to logger, default: True\n            saved (bool, optional): whether to save the model parameters, default: True\n\n        Returns:\n             (float, dict): best valid score and best valid result. If valid_data is None, it returns (-1, None)\n        \"\"\"\n        for epoch_idx in range(self.start_epoch, self.epochs):\n            # train\n            training_start_time = time()\n            self.model.pre_epoch_processing()\n            train_loss, _ = self._train_epoch(train_data, epoch_idx)\n            if torch.is_tensor(train_loss):\n                # get nan loss\n                break\n            #for param_group in self.optimizer.param_groups:\n            #    print('======lr: ', param_group['lr'])\n            self.lr_scheduler.step()\n\n            self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss\n            training_end_time = time()\n            train_loss_output = \\\n                self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss)\n            post_info = self.model.post_epoch_processing()\n            if verbose:\n                self.logger.info(train_loss_output)\n                if post_info is not None:\n                    self.logger.info(post_info)\n\n            # eval: To ensure the test result is the best model under validation data, set self.eval_step == 1\n            if (epoch_idx + 1) % self.eval_step == 0:\n                valid_start_time = time()\n                valid_score, valid_result = self._valid_epoch(valid_data)\n                self.best_valid_score, self.cur_step, stop_flag, update_flag = early_stopping(\n                    valid_score, self.best_valid_score, self.cur_step,\n                    max_step=self.stopping_step, bigger=self.valid_metric_bigger)\n                valid_end_time = time()\n                valid_score_output = \"epoch %d evaluating [time: %.2fs, valid_score: %f]\" % \\\n                                     (epoch_idx, valid_end_time - valid_start_time, valid_score)\n                valid_result_output = 'valid result: \\n' + dict2str(valid_result)\n                # test\n                _, test_result = self._valid_epoch(test_data)\n                if verbose:\n                    self.logger.info(valid_score_output)\n                    self.logger.info(valid_result_output)\n                    self.logger.info('test result: \\n' + dict2str(test_result))\n                if update_flag:\n                    update_output = '██ ' + self.config['model'] + '--Best validation results updated!!!'\n                    if verbose:\n                        self.logger.info(update_output)\n                    self.best_valid_result = valid_result\n                    self.best_test_upon_valid = test_result\n\n                if stop_flag:\n                    stop_output = '+++++Finished training, best eval result in epoch %d' % \\\n                                  (epoch_idx - self.cur_step * self.eval_step)\n                    if verbose:\n                        self.logger.info(stop_output)\n                    break\n        return self.best_valid_score, self.best_valid_result, self.best_test_upon_valid\n\n\n    @torch.no_grad()\n    def evaluate(self, eval_data, is_test=False, idx=0):\n        r\"\"\"Evaluate the model based on the eval data.\n        Returns:\n            dict: eval result, key is the eval metric and value in the corresponding metric value\n        \"\"\"\n        self.model.eval()\n\n        # batch full users\n        batch_matrix_list = []\n        for batch_idx, batched_data in enumerate(eval_data):\n            # predict: interaction without item ids\n            scores = self.model.full_sort_predict(batched_data)\n            masked_items = batched_data[1]\n            # mask out pos items\n            scores[masked_items[0], masked_items[1]] = -1e10\n            # rank and get top-k\n            _, topk_index = torch.topk(scores, max(self.config['topk']), dim=-1)  # nusers x topk\n            batch_matrix_list.append(topk_index)\n        return self.evaluator.evaluate(batch_matrix_list, eval_data, is_test=is_test, idx=idx)\n\n    def plot_train_loss(self, show=True, save_path=None):\n        r\"\"\"Plot the train loss in each epoch\n\n        Args:\n            show (bool, optional): whether to show this figure, default: True\n            save_path (str, optional): the data path to save the figure, default: None.\n                                       If it's None, it will not be saved.\n        \"\"\"\n        epochs = list(self.train_loss_dict.keys())\n        epochs.sort()\n        values = [float(self.train_loss_dict[epoch]) for epoch in epochs]\n        plt.plot(epochs, values)\n        plt.xticks(epochs)\n        plt.xlabel('Epoch')\n        plt.ylabel('Loss')\n        if show:\n            plt.show()\n        if save_path:\n            plt.savefig(save_path)\n\n"
  },
  {
    "path": "src/configs/dataset/baby.yaml",
    "content": "# Common Features\nUSER_ID_FIELD: userID\nITEM_ID_FIELD: itemID\n#RATING_FIELD: rating\nTIME_FIELD: timestamp\n\nfilter_out_cod_start_users: True\n\ninter_file_name: 'baby.inter'\n\n# name of features\nvision_feature_file: 'image_feat.npy'\ntext_feature_file: 'text_feat.npy'\nuser_graph_dict_file: 'user_graph_dict.npy'\n\nfield_separator: \"\\t\"\n"
  },
  {
    "path": "src/configs/dataset/clothing.yaml",
    "content": "# Common Features\nUSER_ID_FIELD: userID\nITEM_ID_FIELD: itemID\n#RATING_FIELD: rating\nTIME_FIELD: timestamp\n\nfilter_out_cod_start_users: True\n\ninter_file_name: 'clothing.inter'\n\n# name of features\nvision_feature_file: 'image_feat.npy'\ntext_feature_file: 'text_feat.npy'\nuser_graph_dict_file: 'user_graph_dict.npy'\n\n\nfield_separator: \"\\t\"\n"
  },
  {
    "path": "src/configs/dataset/elec.yaml",
    "content": "# Common Features\nUSER_ID_FIELD: userID\nITEM_ID_FIELD: itemID\n#RATING_FIELD: rating\nTIME_FIELD: timestamp\n\nfilter_out_cod_start_users: True\n\ninter_file_name: 'elec.inter'\n\n# name of features\nvision_feature_file: 'image_feat.npy'\ntext_feature_file: 'text_feat.npy'\nuser_graph_dict_file: 'user_graph_dict.npy'\n\nfield_separator: \"\\t\"\n"
  },
  {
    "path": "src/configs/dataset/microlens.yaml",
    "content": "# Common Features\nUSER_ID_FIELD: userID\nITEM_ID_FIELD: itemID\n#RATING_FIELD: rating\nTIME_FIELD: timestamp\n\nfilter_out_cod_start_users: True\n\ninter_file_name: 'microlens.inter'\n\n# name of features\nvision_feature_file: 'image_feat.npy'\ntext_feature_file: 'text_feat.npy'\nuser_graph_dict_file: 'user_graph_dict.npy'\n\nfield_separator: \"\\t\"\n"
  },
  {
    "path": "src/configs/dataset/sports.yaml",
    "content": "# Common Features\nUSER_ID_FIELD: userID\nITEM_ID_FIELD: itemID\n#RATING_FIELD: rating\nTIME_FIELD: timestamp\n\nfilter_out_cod_start_users: True\n\ninter_file_name: 'sports.inter'\n\n# name of features\nvision_feature_file: 'image_feat.npy'\ntext_feature_file: 'text_feat.npy'\nuser_graph_dict_file: 'user_graph_dict.npy'\n\nfield_separator: \"\\t\"\n"
  },
  {
    "path": "src/configs/mg.yaml",
    "content": "alpha1: [1.0]\nalpha2: [0.1, 0.2, 0.3]\nbeta: [3]\n\nhyper_parameters: [\"alpha1\", \"alpha2\", \"beta\"]\n"
  },
  {
    "path": "src/configs/model/BM3.yaml",
    "content": "embedding_size: 64\nfeat_embed_dim: 64\n\nn_layers: [1, 2]\ndropout: [0.3, 0.5]\nreg_weight: [0.1, 0.01]\ncl_weight: 2.0\n\nuse_neg_sampling: False\n\nhyper_parameters: [\"n_layers\", \"reg_weight\", \"dropout\"]\n\n"
  },
  {
    "path": "src/configs/model/BPR.yaml",
    "content": "embedding_size: 64\nis_multimodal_model: False\nreg_weight: [2.0, 1.0, 1e-01, 1e-02, 1e-03, 1e-04, 1e-05]\n\nhyper_parameters: [\"reg_weight\"]\n"
  },
  {
    "path": "src/configs/model/DAMRS.yaml",
    "content": "embedding_size: 64\nfeat_embed_dim: 64\nweight_size: [64, 64]\n\n\nkl_weight: [1] # [10, 0.1, 0.01] # [1] # [0, 0.1, 1e-02, 1e-04, 1e-03]\nneighbor_weight: [0.001] # [ 1, 0.1, 0.01, 0.001, 0.0001, 0] # [0.001] # [1, 0.1, 0.001, 0.0001]\n\nn_mm_layers: [1] # [1, 2]\nn_ui_layers: [2] # [1, 2, 3]\nknn_k: 10 # [3, 5, 10, 15, 20]\nlearning_rate: [0.001] # , 0.0005, 0.0001]\n\nitem_graph_dict_file: 'item_graph_dict_2.npy'\n\nhyper_parameters: [\"n_ui_layers\", \"neighbor_weight\", \"kl_weight\", \"n_mm_layers\", \"learning_rate\"]\n\n"
  },
  {
    "path": "src/configs/model/DRAGON.yaml",
    "content": "embedding_size: 64\nfeat_embed_dim: 64\n\nn_mm_layers: 1\nn_layers: 2\nknn_k: 10\nmm_image_weight: 0.1\naggr_mode: ['add']\nlearning_rate: [0.1, 0.01, 0.001, 0.0001, 0.00001]\nreg_weight: [0.1, 0.01, 0.001, 0.0001, 0.00001]\nhyper_parameters: [\"aggr_mode\", \"reg_weight\", \"learning_rate\"]"
  },
  {
    "path": "src/configs/model/DualGNN.yaml",
    "content": "embedding_size: 64\nn_layers: 2\naggr_mode: ['add']\nreg_weight: [0.1, 0.01, 0.001, 0.0001, 0.00001]\nlearning_rate: [0.1, 0.01, 0.001, 0.0001, 0.00001]\nhyper_parameters: [\"aggr_mode\", \"learning_rate\", \"reg_weight\"]\n\n"
  },
  {
    "path": "src/configs/model/FREEDOM.yaml",
    "content": "embedding_size: 64\nfeat_embed_dim: 64\nweight_size: [64, 64]\n\nlambda_coeff: 0.9\nreg_weight: [0.0, 1e-05, 1e-04, 1e-03]\n\nn_mm_layers: 1\nn_ui_layers: 2\nknn_k: 10\n\nmm_image_weight: 0.1\ndropout: [0.8, 0.9]\n\nhyper_parameters: [\"dropout\", \"reg_weight\"]\n"
  },
  {
    "path": "src/configs/model/GRCN.yaml",
    "content": "embedding_size: 64\nlatent_embedding: 64\n\nn_layers: 3\nreg_weight: [0.1, 0.01, 0.001, 0.0001, 0.00001]\nlearning_rate: [1, 0.1, 0.01, 0.001, 0.0001]\n\n\nhyper_parameters: [\"reg_weight\", \"learning_rate\"]"
  },
  {
    "path": "src/configs/model/ItemKNNCBF.yaml",
    "content": "\nknn_k: [10]\nshrink: [10]\nreq_training: False\nepochs: 1\nhyper_parameters: ['shrink', 'knn_k']\n\n"
  },
  {
    "path": "src/configs/model/LATTICE.yaml",
    "content": "embedding_size: 64\nfeat_embed_dim: 64\nweight_size: [64, 64]\n\nlearning_rate_scheduler: [0.96, 50]\nlambda_coeff: 0.9\nreg_weight: [0.0, 1e-05, 1e-04, 1e-03]\n\ncf_model: lightgcn\nmess_dropout: [0.1, 0.1]\nn_layers: 1\nknn_k: 10\n\nlearning_rate: [0.0001, 0.0005, 0.001, 0.005]\n\nhyper_parameters: [\"reg_weight\", \"learning_rate\"]\n\n"
  },
  {
    "path": "src/configs/model/LGMRec.yaml",
    "content": "embedding_size: 64\nfeat_embed_dim: 64\ncf_model: lightgcn\n\nn_ui_layers: [2]\nn_mm_layers: [2]\n\n#baby\nn_hyper_layer: [1]\nhyper_num: [4]\nkeep_rate: [0.5]\nalpha: [0.3]\n\n# #sports\n# n_hyper_layer: [1]\n# hyper_num: [4]\n# keep_rate: [0.4]\n# alpha: [0.6]s\n\n# #clothing\n# n_hyper_layer: [2]\n# hyper_num: [64]\n# keep_rate: [0.2]\n# alpha: [0.2]\n\n\ncl_weight : [1e-04]\nreg_weight: [1e-06]\n\nhyper_parameters: [\"n_ui_layers\", \"n_mm_layers\", \"n_hyper_layer\", \"hyper_num\", \"keep_rate\",  \"alpha\", \"cl_weight\", \"reg_weight\"]"
  },
  {
    "path": "src/configs/model/LayerGCN.yaml",
    "content": "embedding_size: 64\nn_layers: [4]\nreg_weight: [1e-02, 1e-03, 1e-04, 1e-05]\ndropout: [0.0, 0.1, 0.2]\nhyper_parameters: [\"n_layers\", \"dropout\", \"reg_weight\"]\n"
  },
  {
    "path": "src/configs/model/LightGCN.yaml",
    "content": "embedding_size: 64\nis_multimodal_model: False\nn_layers: [1, 2, 3, 4]\nreg_weight: [1e-02, 1e-03, 1e-04, 1e-05, 1e-06]\nhyper_parameters: [\"n_layers\", \"reg_weight\"]\n\n"
  },
  {
    "path": "src/configs/model/MGCN.yaml",
    "content": "embedding_size: 64\nn_ui_layers: 2\nn_layers: 1\n\nlearning_rate_scheduler: [0.96, 50]\nlambda_coeff: 0.9\nreg_weight: 1e-04\n\nknn_k: 10\n\nlearning_rate: 0.001\n\ncl_loss: [0.001,0.01,0.1]\n\nhyper_parameters: [\"cl_loss\"]"
  },
  {
    "path": "src/configs/model/MMGCN.yaml",
    "content": "embedding_size: 64\nn_layers: 2\nreg_weight: [0, 0.00001, 0.0001, 0.001, 0.01, 0.1]\nlearning_rate: [0.0001, 0.0005, 0.001, 0.005, 0.01]\n\nhyper_parameters: [\"reg_weight\", \"learning_rate\"]\n\n"
  },
  {
    "path": "src/configs/model/MVGAE.yaml",
    "content": "embedding_size: 64\n\nn_layers: 1\n#reg_weight: [0.1, 0.01, 0.001, 0.0001, 0.00001]\nlearning_rate: [0.0001, 0.001, 0.01, 0.1]\nbeta: [0.01, 0.1, 1]\n\n\nhyper_parameters: [\"learning_rate\", \"beta\"]"
  },
  {
    "path": "src/configs/model/PGL.yaml",
    "content": "embedding_size: 64\nfeat_embed_dim: 64\nweight_size: [64, 64]\n\nlearning_rate_scheduler: [0.96, 50]\nlambda_coeff: 0.9\nlearning_rate: 0.001\n\nreg_weight: [0]\n\nn_mm_layers: 1\nn_ui_layers: 2\nknn_k: 10\n\nmm_image_weight: 0.1\ndropout: [0.2]\n\nmode: ['local']\n\nhyper_parameters: [\"dropout\",\"reg_weight\",\"mode\"]"
  },
  {
    "path": "src/configs/model/SELFCFED_LGN.yaml",
    "content": "embedding_size: 64\nn_layers: [1, 2]\ndropout: [0.1, 0.2, 0.5]\nreg_weight: [1e-01, 1e-02, 1e-03, 1e-04, 1e-05, 0.0]\nuse_neg_sampling: False\nhyper_parameters: [\"n_layers\", \"dropout\", \"reg_weight\"]\n\n"
  },
  {
    "path": "src/configs/model/SLMRec.yaml",
    "content": "recdim: 64\nlayer_num: 3\nreg: [0.0001, 0.001, 0.01, 0.1]\nssl_task: 'FAC'\nlearning_rate: [0.0001, 0.001, 0.01, 0.1]\nweight_decay: 1e-4\nssl_alpha: [0.01, 0.05, 0.1, 0.5, 1.0]\nssl_temp: [0.1, 0.2, 0.5, 1.0]\ndropout_rate: 0.3\nmm_fusion_mode: 'concat'\ntemp: 0.2\ninit: 'xavier'\nadj_type: 'pre'\nhyper_parameters: [\"learning_rate\", \"ssl_temp\", \"ssl_alpha\", \"reg\"]\n"
  },
  {
    "path": "src/configs/model/SMORE.yaml",
    "content": "embedding_size: 64\nn_ui_layers: [3,4]\nn_layers: 1\n\nlearning_rate_scheduler: [0.96, 50]\nlambda_coeff: 0.9\nreg_weight: [1e-5,1e-4]\nlearning_rate: 0.001\n\ncl_loss: 0.01 \ntemperature: 0.2\nimage_knn_k: [10,15,20,40]\ntext_knn_k: [10,15,20,40]\n\ndropout_rate : [0, 0.1]\n\nhyper_parameters: [\"n_ui_layers\", \"image_knn_k\", \"text_knn_k\", \"reg_weight\", \"dropout_rate\"]"
  },
  {
    "path": "src/configs/model/VBPR.yaml",
    "content": "embedding_size: 64\nreg_weight: [2.0, 1.0, 1e-01, 1e-02, 1e-03, 1e-04, 1e-05]\nhyper_parameters: [\"reg_weight\"]\n"
  },
  {
    "path": "src/configs/overall.yaml",
    "content": "# general\ngpu_id: 0\nuse_gpu: True\nseed: [999]\n\n# multi-modal raw features\ndata_path: '../data/'\ninter_splitting_label: 'x_label'\nfilter_out_cod_start_users: True\nis_multimodal_model: True\n\ncheckpoint_dir: 'saved'\nsave_recommended_topk: True\nrecommend_topk: 'recommend_topk/'\n\nembedding_size: 64\nweight_decay: 0.0\nreq_training: True\n#embedding_size: 3780\n\n# training settings\nepochs: 1000\nstopping_step: 20\ntrain_batch_size: 2048\nlearner: adam\nlearning_rate: 0.001\nlearning_rate_scheduler: [1.0, 50]\neval_step: 1\n\ntraining_neg_sample_num: 1\nuse_neg_sampling: True\nuse_full_sampling: False\nNEG_PREFIX: neg__\n\nUSER_ID_FIELD: user_id:token\nITEM_ID_FIELD: item_id:token\nTIME_FIELD: timestamp:float\nfield_separator: \"\\t\"\n\n\n# evaluation settings\nmetrics: [\"Recall\", \"NDCG\", \"Precision\", \"MAP\"]\ntopk: [5, 10, 20, 50]\nvalid_metric: Recall@20\neval_batch_size: 4096\n\n#\nuse_raw_features: False\nmax_txt_len: 32\nmax_img_size: 256\nvocab_size: 30522\ntype_vocab_size: 2\nhidden_size: 4\npad_token_id: 0\nmax_position_embeddings: 512\nlayer_norm_eps: 1e-12\nhidden_dropout_prob: 0.1\n\nend2end: False\n\n# iteration parameters\nhyper_parameters: [\"seed\"]\n"
  },
  {
    "path": "src/main.py",
    "content": "# coding: utf-8\n# @email: enoche.chow@gmail.com\n\n\"\"\"\nMain entry\n# UPDATED: 2022-Feb-15\n##########################\n\"\"\"\n\nimport os\nimport argparse\nfrom utils.quick_start import quick_start\nos.environ['NUMEXPR_MAX_THREADS'] = '48'\n\n\nif __name__ == '__main__':\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--model', '-m', type=str, default='SELFCFED_LGN', help='name of models')\n    parser.add_argument('--dataset', '-d', type=str, default='baby', help='name of datasets')\n\n    config_dict = {\n        'gpu_id': 0,\n    }\n\n    args, _ = parser.parse_known_args()\n\n    quick_start(model=args.model, dataset=args.dataset, config_dict=config_dict, save_model=True)\n\n\n"
  },
  {
    "path": "src/models/bm3.py",
    "content": "# coding: utf-8\n# @email: enoche.chow@gmail.com\nr\"\"\"\n\n################################################\npaper:  Bootstrap Latent Representations for Multi-modal Recommendation\nhttps://arxiv.org/abs/2207.05969\n\"\"\"\nimport os\nimport copy\nimport random\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.functional import cosine_similarity\n\nfrom common.abstract_recommender import GeneralRecommender\nfrom common.loss import EmbLoss\n\n\nclass BM3(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(BM3, self).__init__(config, dataset)\n\n        self.embedding_dim = config['embedding_size']\n        self.feat_embed_dim = config['embedding_size']\n        self.n_layers = config['n_layers']\n        self.reg_weight = config['reg_weight']\n        self.cl_weight = config['cl_weight']\n        self.dropout = config['dropout']\n\n        self.n_nodes = self.n_users + self.n_items\n\n        # load dataset info\n        self.norm_adj = self.get_norm_adj_mat(dataset.inter_matrix(form='coo').astype(np.float32)).to(self.device)\n\n        self.user_embedding = nn.Embedding(self.n_users, self.embedding_dim)\n        self.item_id_embedding = nn.Embedding(self.n_items, self.embedding_dim)\n        nn.init.xavier_uniform_(self.user_embedding.weight)\n        nn.init.xavier_uniform_(self.item_id_embedding.weight)\n\n        self.predictor = nn.Linear(self.embedding_dim, self.embedding_dim)\n        self.reg_loss = EmbLoss()\n\n        nn.init.xavier_normal_(self.predictor.weight)\n\n        if self.v_feat is not None:\n            self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=False)\n            self.image_trs = nn.Linear(self.v_feat.shape[1], self.feat_embed_dim)\n            nn.init.xavier_normal_(self.image_trs.weight)\n        if self.t_feat is not None:\n            self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=False)\n            self.text_trs = nn.Linear(self.t_feat.shape[1], self.feat_embed_dim)\n            nn.init.xavier_normal_(self.text_trs.weight)\n\n    def get_norm_adj_mat(self, interaction_matrix):\n        A = sp.dok_matrix((self.n_users + self.n_items,\n                           self.n_users + self.n_items), dtype=np.float32)\n        inter_M = interaction_matrix\n        inter_M_t = interaction_matrix.transpose()\n        data_dict = dict(zip(zip(inter_M.row, inter_M.col + self.n_users),\n                             [1] * inter_M.nnz))\n        data_dict.update(dict(zip(zip(inter_M_t.row + self.n_users, inter_M_t.col),\n                                  [1] * inter_M_t.nnz)))\n        A._update(data_dict)\n        # norm adj matrix\n        sumArr = (A > 0).sum(axis=1)\n        # add epsilon to avoid Devide by zero Warning\n        diag = np.array(sumArr.flatten())[0] + 1e-7\n        diag = np.power(diag, -0.5)\n        D = sp.diags(diag)\n        L = D * A * D\n        # covert norm_adj matrix to tensor\n        L = sp.coo_matrix(L)\n        row = L.row\n        col = L.col\n        i = torch.LongTensor(np.array([row, col]))\n        data = torch.FloatTensor(L.data)\n\n        return torch.sparse.FloatTensor(i, data, torch.Size((self.n_nodes, self.n_nodes)))\n\n    def forward(self):\n        h = self.item_id_embedding.weight\n\n        ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0)\n        all_embeddings = [ego_embeddings]\n        for i in range(self.n_layers):\n            ego_embeddings = torch.sparse.mm(self.norm_adj, ego_embeddings)\n            all_embeddings += [ego_embeddings]\n        all_embeddings = torch.stack(all_embeddings, dim=1)\n        all_embeddings = all_embeddings.mean(dim=1, keepdim=False)\n        u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0)\n        return u_g_embeddings, i_g_embeddings + h\n\n    def calculate_loss(self, interactions):\n        # online network\n        u_online_ori, i_online_ori = self.forward()\n        t_feat_online, v_feat_online = None, None\n        if self.t_feat is not None:\n            t_feat_online = self.text_trs(self.text_embedding.weight)\n        if self.v_feat is not None:\n            v_feat_online = self.image_trs(self.image_embedding.weight)\n\n        with torch.no_grad():\n            u_target, i_target = u_online_ori.clone(), i_online_ori.clone()\n            u_target.detach()\n            i_target.detach()\n            u_target = F.dropout(u_target, self.dropout)\n            i_target = F.dropout(i_target, self.dropout)\n\n            if self.t_feat is not None:\n                t_feat_target = t_feat_online.clone()\n                t_feat_target = F.dropout(t_feat_target, self.dropout)\n\n            if self.v_feat is not None:\n                v_feat_target = v_feat_online.clone()\n                v_feat_target = F.dropout(v_feat_target, self.dropout)\n\n        u_online, i_online = self.predictor(u_online_ori), self.predictor(i_online_ori)\n\n        users, items = interactions[0], interactions[1]\n        u_online = u_online[users, :]\n        i_online = i_online[items, :]\n        u_target = u_target[users, :]\n        i_target = i_target[items, :]\n\n        loss_t, loss_v, loss_tv, loss_vt = 0.0, 0.0, 0.0, 0.0\n        if self.t_feat is not None:\n            t_feat_online = self.predictor(t_feat_online)\n            t_feat_online = t_feat_online[items, :]\n            t_feat_target = t_feat_target[items, :]\n            loss_t = 1 - cosine_similarity(t_feat_online, i_target.detach(), dim=-1).mean()\n            loss_tv = 1 - cosine_similarity(t_feat_online, t_feat_target.detach(), dim=-1).mean()\n        if self.v_feat is not None:\n            v_feat_online = self.predictor(v_feat_online)\n            v_feat_online = v_feat_online[items, :]\n            v_feat_target = v_feat_target[items, :]\n            loss_v = 1 - cosine_similarity(v_feat_online, i_target.detach(), dim=-1).mean()\n            loss_vt = 1 - cosine_similarity(v_feat_online, v_feat_target.detach(), dim=-1).mean()\n\n        loss_ui = 1 - cosine_similarity(u_online, i_target.detach(), dim=-1).mean()\n        loss_iu = 1 - cosine_similarity(i_online, u_target.detach(), dim=-1).mean()\n\n        return (loss_ui + loss_iu).mean() + self.reg_weight * self.reg_loss(u_online_ori, i_online_ori) + \\\n               self.cl_weight * (loss_t + loss_v + loss_tv + loss_vt).mean()\n\n    def full_sort_predict(self, interaction):\n        user = interaction[0]\n        u_online, i_online = self.forward()\n        u_online, i_online = self.predictor(u_online), self.predictor(i_online)\n        score_mat_ui = torch.matmul(u_online[user], i_online.transpose(0, 1))\n        return score_mat_ui\n\n"
  },
  {
    "path": "src/models/bpr.py",
    "content": "# coding: utf-8\n# @email: enoche.chow@gmail.com\nr\"\"\"\nBPR, with only u-i graph\n################################################\nReference:\n    Steffen Rendle et al. \"BPR: Bayesian Personalized Ranking from Implicit Feedback.\" in UAI 2009.\n\"\"\"\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom common.abstract_recommender import GeneralRecommender\nfrom common.loss import BPRLoss, EmbLoss\nfrom common.init import xavier_normal_initialization\nimport torch.nn.functional as F\nfrom torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout\n\n\nclass BPR(GeneralRecommender):\n    r\"\"\"BPR is a basic matrix factorization model that be trained in the pairwise way.\n\n    \"\"\"\n    def __init__(self, config, dataset):\n        super(BPR, self).__init__(config, dataset)\n\n        # load parameters info\n        self.embedding_size = config['embedding_size']\n        self.reg_weight = config['reg_weight']  # float32 type: the weight decay for l2 normalizaton\n\n        # define layers and loss\n        self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)\n        self.item_embedding = nn.Embedding(self.n_items, self.embedding_size)\n        self.loss = BPRLoss()\n        self.reg_loss = EmbLoss()\n\n        # parameters initialization\n        self.apply(xavier_normal_initialization)\n\n    def get_user_embedding(self, user):\n        r\"\"\" Get a batch of user embedding tensor according to input user's id.\n\n        Args:\n            user (torch.LongTensor): The input tensor that contains user's id, shape: [batch_size, ]\n\n        Returns:\n            torch.FloatTensor: The embedding tensor of a batch of user, shape: [batch_size, embedding_size]\n        \"\"\"\n        return self.user_embedding(user)\n\n    def get_item_embedding(self, item):\n        r\"\"\" Get a batch of item embedding tensor according to input item's id.\n\n        Args:\n            item (torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]\n\n        Returns:\n            torch.FloatTensor: The embedding tensor of a batch of item, shape: [batch_size, embedding_size]\n        \"\"\"\n        return self.item_embedding(item)\n\n    def forward(self, dropout=0.0):\n        user_e = F.dropout(self.user_embedding.weight, dropout)\n        item_e = F.dropout(self.item_embedding.weight, dropout)\n        return user_e, item_e\n\n    def calculate_loss(self, interaction):\n        \"\"\"\n        loss on one batch\n        :param interaction:\n            batch data format: tensor(3, batch_size)\n            [0]: user list; [1]: positive items; [2]: negative items\n        :return:\n        \"\"\"\n        user = interaction[0]\n        pos_item = interaction[1]\n        neg_item = interaction[2]\n\n        user_embeddings, item_embeddings = self.forward()\n        user_e = user_embeddings[user, :]\n        pos_e = item_embeddings[pos_item, :]\n        neg_e = self.get_item_embedding(neg_item)\n        pos_item_score, neg_item_score = torch.mul(user_e, pos_e).sum(dim=1), torch.mul(user_e, neg_e).sum(dim=1)\n        mf_loss = self.loss(pos_item_score, neg_item_score)\n        reg_loss = self.reg_loss(user_e, pos_e, neg_e)\n        loss = mf_loss + self.reg_weight * reg_loss\n        return loss\n\n    def full_sort_predict(self, interaction):\n        user = interaction[0]\n        user_e = self.get_user_embedding(user)\n        all_item_e = self.item_embedding.weight\n        score = torch.matmul(user_e, all_item_e.transpose(0, 1))    # n_users * n_items\n\n        return score\n"
  },
  {
    "path": "src/models/damrs.py",
    "content": "# coding: utf-8\n\nimport os\nimport random\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom common.abstract_recommender import GeneralRecommender\n\n\nclass DAMRS(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(DAMRS, self).__init__(config, dataset)\n\n        self.embedding_dim = config['embedding_size']\n\n        self.lambda_coeff = config['lambda_coeff']\n        self.cf_model = config['cf_model']\n\n        self.knn_k = config['knn_k']\n        self.n_layers = config['n_mm_layers']\n\n        self.n_ui_layers = config['n_ui_layers']\n        self.reg_weight = config['reg_weight']\n        self.kl_weight = config['kl_weight']\n        self.neighbor_weight = config['neighbor_weight']\n        self.build_item_graph = True\n\n        self.n_nodes = self.n_users + self.n_items\n\n        # load dataset info\n        self.interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32)\n        self.norm_adj = self.get_norm_adj_mat().to(self.device)\n\n        self.user_embedding = nn.Embedding(self.n_users, self.embedding_dim)\n        self.item_id_embedding = nn.Embedding(self.n_items, self.embedding_dim)\n        nn.init.xavier_uniform_(self.user_embedding.weight)\n        nn.init.xavier_uniform_(self.item_id_embedding.weight)\n\n        if self.v_feat is not None:\n            self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=True)\n            self.image_trs = nn.Linear(self.v_feat.shape[1], self.embedding_dim)\n        if self.t_feat is not None:\n            self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=True)\n            self.text_trs = nn.Linear(self.t_feat.shape[1], self.embedding_dim)\n\n        self.image_adj, self.text_adj = self.get_knn_adj_mat(self.image_embedding.weight.detach(),\n                                                             self.text_embedding.weight.detach())\n\n        dataset_path = os.path.abspath(config['data_path'] + config['dataset'])\n        self.item_graph_dict = np.load(os.path.join(dataset_path, config['item_graph_dict_file']),\n                                       allow_pickle=True).item()\n\n        __, self.session_adj = self.get_session_adj()\n\n    def get_knn_adj_mat(self, v_embeddings, t_embeddings):\n        v_context_norm = v_embeddings.div(torch.norm(v_embeddings, p=2, dim=-1, keepdim=True))\n        v_sim = torch.mm(v_context_norm, v_context_norm.transpose(1, 0))\n\n        t_context_norm = t_embeddings.div(torch.norm(t_embeddings, p=2, dim=-1, keepdim=True))\n        t_sim = torch.mm(t_context_norm, t_context_norm.transpose(1, 0))\n\n        mask_v = v_sim < v_sim.mean()\n        mask_t = t_sim < t_sim.mean()\n\n        t_sim[mask_v] = 0\n        v_sim[mask_t] = 0\n        t_sim[mask_t] = 0\n        v_sim[mask_v] = 0\n\n        index_x = []\n        index_v = []\n        index_t = []\n\n        all_items = np.arange(self.n_items).tolist()\n\n        def _random():\n            rd_id = random.sample(all_items, 9)  # [0]\n            return rd_id\n\n        for i in range(self.n_items):\n            item_num = len(torch.nonzero(t_sim[i]))\n            if item_num <= self.knn_k:\n                _, v_knn_ind = torch.topk(v_sim[i], item_num)\n                _, t_knn_ind = torch.topk(t_sim[i], item_num)\n            else:\n                _, v_knn_ind = torch.topk(v_sim[i], self.knn_k)\n                _, t_knn_ind = torch.topk(t_sim[i], self.knn_k)\n\n            index_x.append(torch.ones_like(v_knn_ind) * i)\n            index_v.append(v_knn_ind)\n            index_t.append(t_knn_ind)\n\n        index_x = torch.cat(index_x, dim=0).cuda()\n        index_v = torch.cat(index_v, dim=0).cuda()\n        index_t = torch.cat(index_t, dim=0).cuda()\n\n        adj_size = (self.n_items, self.n_items)\n        del v_sim, t_sim\n\n        v_indices = torch.stack((torch.flatten(index_x), torch.flatten(index_v)), 0)\n        t_indices = torch.stack((torch.flatten(index_x), torch.flatten(index_t)), 0)\n        # norm\n        return self.compute_normalized_laplacian(v_indices, adj_size), self.compute_normalized_laplacian(t_indices,\n                                                                                                         adj_size)\n\n    def compute_normalized_laplacian(self, indices, adj_size):\n        adj = torch.sparse.FloatTensor(indices, torch.ones_like(indices[0]), adj_size)\n        row_sum = 1e-7 + torch.sparse.sum(adj, -1).to_dense()\n        r_inv_sqrt = torch.pow(row_sum, -0.5)\n        rows_inv_sqrt = r_inv_sqrt[indices[0]]\n        cols_inv_sqrt = r_inv_sqrt[indices[1]]\n        values = rows_inv_sqrt * cols_inv_sqrt\n        return torch.sparse.FloatTensor(indices, values, adj_size)\n\n    def get_session_adj(self):\n        index_x = []\n        index_y = []\n        values = []\n        for i in range(self.n_items):\n            index_x.append(i)\n            index_y.append(i)\n            values.append(1)\n            if i in self.item_graph_dict.keys():\n                item_graph_sample = self.item_graph_dict[i][0]\n                item_graph_weight = self.item_graph_dict[i][1]\n\n                for j in range(len(item_graph_sample)):\n                    index_x.append(i)\n                    index_y.append(item_graph_sample[j])\n                    values.append(item_graph_weight[j])\n        index_x = torch.tensor(index_x, dtype=torch.long)\n        index_y = torch.tensor(index_y, dtype=torch.long)\n        indices = torch.stack((index_x, index_y), 0).to(self.device)\n        # norm\n        return indices, self.compute_normalized_laplacian(indices, (self.n_items, self.n_items))\n\n    def label_prediction(self, emb, aug_emb):\n        n_emb = F.normalize(emb, dim=1)\n        n_aug_emb = F.normalize(aug_emb, dim=1)\n        prob = torch.mm(n_emb, n_aug_emb.transpose(0, 1))\n        prob = F.softmax(prob, dim=1)\n        del n_emb, n_aug_emb\n        return prob\n\n    def generate_pesudo_labels(self, prob1, prob2, prob3):\n        positive = prob1 + prob2 + prob3 + prob3\n        _, mm_pos_ind = torch.topk(positive, 10, dim=-1)\n        prob = prob3.clone()\n        prob.scatter_(1, mm_pos_ind, 0)\n        _, single_pos_ind = torch.topk(prob, 10, dim=-1)\n        return mm_pos_ind, single_pos_ind\n\n    def neighbor_discrimination(self, mm_positive, s_positive, emb, aug_emb, temperature=0.2):\n        def score(x1, x2):\n            return torch.sum(torch.mul(x1, x2), dim=2)\n\n        n_aug_emb = F.normalize(aug_emb, dim=1)\n        n_emb = F.normalize(emb, dim=1)\n\n        mm_pos_emb = n_aug_emb[mm_positive]\n        s_pos_emb = n_aug_emb[s_positive]\n\n        emb2 = torch.reshape(n_emb, [-1, 1, self.embedding_dim])\n        emb2 = torch.tile(emb2, [1, 10, 1])\n\n        mm_pos_score = score(emb2, mm_pos_emb)\n        s_pos_score = score(emb2, s_pos_emb)\n        ttl_score = torch.matmul(n_emb, n_aug_emb.transpose(0, 1))\n\n        mm_pos_score = torch.sum(torch.exp(mm_pos_score / temperature), dim=1)\n        s_pos_score = torch.sum(torch.exp(s_pos_score / temperature), dim=1)\n        ttl_score = torch.exp(ttl_score / temperature).sum(dim=1)  # 1\n\n        cl_loss = - torch.log(mm_pos_score / (ttl_score) + 10e-10) - torch.log(\n            s_pos_score / (ttl_score - mm_pos_score) + 10e-10)\n        return torch.mean(cl_loss)\n\n    def KL(self, p1, p2):\n        return p1 * torch.log(p1) - p1 * torch.log(p2) + \\\n               (1 - p1) * torch.log(1 - p1) - (1 - p1) * torch.log(1 - p2)\n\n    def get_norm_adj_mat(self):\n        A = sp.dok_matrix((self.n_users + self.n_items,\n                           self.n_users + self.n_items), dtype=np.float32)\n        inter_M = self.interaction_matrix\n        inter_M_t = self.interaction_matrix.transpose()\n        data_dict = dict(zip(zip(inter_M.row, inter_M.col + self.n_users),\n                             [1] * inter_M.nnz))\n        data_dict.update(dict(zip(zip(inter_M_t.row + self.n_users, inter_M_t.col),\n                                  [1] * inter_M_t.nnz)))\n        A._update(data_dict)\n        # norm adj matrix\n        sumArr = (A > 0).sum(axis=1)\n        # add epsilon to avoid Devide by zero Warning\n        diag = np.array(sumArr.flatten())[0] + 1e-7\n        diag = np.power(diag, -0.5)\n        D = sp.diags(diag)\n        L = D * A * D\n        # covert norm_adj matrix to tensor\n        L = sp.coo_matrix(L)\n        row = L.row\n        col = L.col\n        i = torch.LongTensor(np.array([row, col]))\n        data = torch.FloatTensor(L.data)\n\n        return torch.sparse.FloatTensor(i, data, torch.Size((self.n_nodes, self.n_nodes)))\n\n    def forward(self):\n        ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0)\n        all_embeddings = [ego_embeddings]\n        for i in range(self.n_ui_layers):\n            side_embeddings = torch.sparse.mm(self.norm_adj, ego_embeddings)\n            ego_embeddings = side_embeddings\n            all_embeddings += [ego_embeddings]\n        all_embeddings = torch.stack(all_embeddings, dim=1)\n        all_embeddings = all_embeddings.mean(dim=1, keepdim=False)\n        u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0)\n\n        del ego_embeddings, side_embeddings\n\n        # text emb\n        h_t = self.item_id_embedding.weight.clone()\n        for i in range(self.n_layers):\n            h_t = torch.sparse.mm(self.text_adj, h_t)\n\n        # image emb\n        h_v = self.item_id_embedding.weight.clone()\n        for i in range(self.n_layers):\n            h_v = torch.sparse.mm(self.image_adj, h_v)\n\n        # session emb\n        h_s = self.item_id_embedding.weight.clone()\n        for i in range(self.n_layers):\n            h_s = torch.sparse.mm(self.session_adj, h_s)\n\n        return u_g_embeddings, i_g_embeddings, h_t, h_v, h_s\n\n    def calculate_loss(self, interaction):\n        users = interaction[0]\n        pos_items = interaction[1]\n        neg_items = interaction[2]\n\n        user_embeddings, item_embeddings, h_t, h_v, h_s = self.forward()\n        self.build_item_graph = False\n\n        u_idx = torch.unique(users, return_inverse=True, sorted=False)\n        i_idx = torch.unique(torch.cat((pos_items, neg_items)), return_inverse=True, sorted=False)\n        u_id = u_idx[0]\n        i_id = i_idx[0]\n\n        # text\n        label_prediction_t = self.label_prediction(h_t[i_id], h_t)\n        # visual\n        label_prediction_v = self.label_prediction(h_v[i_id], h_v)\n        # session\n        label_prediction_s = self.label_prediction(h_s[i_id], h_s)\n\n        mm_postive_s, s_postive_s = self.generate_pesudo_labels(label_prediction_t, label_prediction_v,\n                                                                label_prediction_s)\n        neighbor_dis_loss_1 = self.neighbor_discrimination(mm_postive_s, s_postive_s, h_s[i_id], h_s)\n\n        mm_postive_v, s_postive_v = self.generate_pesudo_labels(label_prediction_t, label_prediction_s,\n                                                                label_prediction_v)\n        neighbor_dis_loss_2 = self.neighbor_discrimination(mm_postive_v, s_postive_v, h_v[i_id], h_v)\n\n        mm_postive_t, s_postive_t = self.generate_pesudo_labels(label_prediction_v, label_prediction_s,\n                                                                label_prediction_t)\n        neighbor_dis_loss_3 = self.neighbor_discrimination(mm_postive_t, s_postive_t, h_t[i_id], h_t)\n\n        neighbor_dis_loss = (neighbor_dis_loss_1 + neighbor_dis_loss_2 + neighbor_dis_loss_3) / 3.0\n\n        n_u_g_embeddings = user_embeddings[u_id]\n        it_embeddings = (h_t + h_s + h_v) / 3.0\n\n        p_g = F.sigmoid(torch.matmul(n_u_g_embeddings, F.normalize(item_embeddings[i_id], dim=-1).transpose(0, 1)))\n        p_t = F.sigmoid(torch.matmul(n_u_g_embeddings, F.normalize(it_embeddings[i_id], dim=-1).transpose(0, 1)))\n\n        KL_loss = torch.mean(self.KL(p_g, p_t) + self.KL(p_t, p_g))\n\n        p_weight, n_weight = self.get_weight_modal(users, pos_items, neg_items, user_embeddings, h_t, h_v, h_s)\n\n        u_g_embeddings = user_embeddings[users]\n        ia_embeddings = item_embeddings + (h_t + h_v + h_s) / 3.0\n        pos_i_g_embeddings = ia_embeddings[pos_items]\n        neg_i_g_embeddings = ia_embeddings[neg_items]\n\n        batch_mf_loss = self.bpr_loss(u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings, p_weight, n_weight)\n\n        return batch_mf_loss + self.neighbor_weight * (neighbor_dis_loss) + KL_loss * self.kl_weight\n\n    def full_sort_predict(self, interaction):\n        user = interaction[0]\n        user_embeddings, item_embeddings, h_t, h_v, h_s = self.forward()  #\n\n        user_e = user_embeddings[user, :]\n        i_embedding = (h_v + h_t + h_s) / 3.0\n        all_item_e = item_embeddings + i_embedding\n        score = torch.matmul(user_e, all_item_e.transpose(0, 1))\n        return score\n\n    def get_weight_modal(self, users, pos_items, neg_items, user_embeddings, h_t, h_v, h_s):\n        u_g_embeddings = user_embeddings[users]\n\n        p_t = torch.sum(torch.mul(u_g_embeddings, F.normalize(h_t[pos_items], dim=-1)), dim=1)\n        p_v = torch.sum(torch.mul(u_g_embeddings, F.normalize(h_s[pos_items], dim=-1)), dim=1)\n        p_s = torch.sum(torch.mul(u_g_embeddings, F.normalize(h_v[pos_items], dim=-1)), dim=1)\n\n        n_t = torch.sum(torch.mul(u_g_embeddings, F.normalize(h_t[neg_items], dim=-1)), dim=1)\n        n_v = torch.sum(torch.mul(u_g_embeddings, F.normalize(h_s[neg_items], dim=-1)), dim=1)\n        n_s = torch.sum(torch.mul(u_g_embeddings, F.normalize(h_v[neg_items], dim=-1)), dim=1)\n\n        p_tensor = F.sigmoid(torch.stack([p_t, p_v, p_s]))\n        p_variance = torch.var(p_tensor, dim=0).data\n        p_mean_value = torch.mean(p_tensor, dim=0).data\n        p_max_value, _ = torch.max(p_tensor, dim=0)\n\n        n_tensor = F.sigmoid(torch.stack([n_t, n_v, n_s]))\n        n_mean_value = torch.mean(n_tensor).data\n\n        p_mean_probability = torch.pow(p_mean_value, 1.0).data\n        p_var_probability = torch.pow(torch.exp(-p_variance).data, 2.0)  # 0 ~ 1\n        pos_weight = p_mean_probability * p_var_probability\n        pos_weight = torch.clamp(pos_weight, 0, 1).data\n\n        mask = torch.zeros_like(p_mean_value)\n        mask[p_mean_value < n_mean_value] = 1\n\n        neg_weight_max = torch.pow((p_max_value - n_mean_value.data), 1.0) * mask\n        neg_weight = torch.clamp(neg_weight_max, 0, 1).data\n        # print(neg_weight)\n\n        return pos_weight, neg_weight\n\n    def bpr_loss(self, users, pos_items, neg_items, p_weight, n_weight):\n        pos_scores = torch.sum(torch.mul(users, pos_items), dim=1)\n        neg_scores = torch.sum(torch.mul(users, neg_items), dim=1)\n\n        p_maxi = torch.log(F.sigmoid(pos_scores - neg_scores)) * p_weight\n        n_maxi = torch.log(F.sigmoid(neg_scores - pos_scores)) * n_weight\n        mf_loss = -torch.mean(p_maxi + n_maxi)\n        # mf_loss = -torch.sum(maxi)\n        return mf_loss"
  },
  {
    "path": "src/models/dragon.py",
    "content": "# coding: utf-8\n#\n# user-graph need to be generated by the following script\n# tools/generate-u-u-matrix.py\nimport os\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.utils import remove_self_loops, add_self_loops, degree\nimport torch_geometric\n\nfrom common.abstract_recommender import GeneralRecommender\nfrom common.loss import BPRLoss, EmbLoss\nfrom common.init import xavier_uniform_initialization\n\n\nclass DRAGON(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(DRAGON, self).__init__(config, dataset)\n\n        num_user = self.n_users\n        num_item = self.n_items\n        batch_size = config['train_batch_size']  # not used\n        dim_x = config['embedding_size']\n        self.feat_embed_dim = config['feat_embed_dim']\n        self.n_layers = config['n_mm_layers']\n        self.knn_k = config['knn_k']\n        self.mm_image_weight = config['mm_image_weight']\n        has_id = True\n\n        self.batch_size = batch_size\n        self.num_user = num_user\n        self.num_item = num_item\n        self.k = 40\n        self.aggr_mode = config['aggr_mode']\n        self.user_aggr_mode = 'softmax'\n        self.num_layer = 1\n        self.cold_start = 0\n        self.dataset = dataset\n        # self.construction = 'weighted_max'\n        self.construction = 'cat'\n        self.reg_weight = config['reg_weight']\n        self.drop_rate = 0.1\n        self.v_rep = None\n        self.t_rep = None\n        self.v_preference = None\n        self.t_preference = None\n        self.dim_latent = 64\n        self.dim_feat = 128\n        self.MLP_v = nn.Linear(self.dim_latent, self.dim_latent, bias=False)\n        self.MLP_t = nn.Linear(self.dim_latent, self.dim_latent, bias=False)\n        self.mm_adj = None\n\n        dataset_path = os.path.abspath(config['data_path'] + config['dataset'])\n        self.user_graph_dict = np.load(os.path.join(dataset_path, config['user_graph_dict_file']),\n                                       allow_pickle=True).item()\n\n        mm_adj_file = os.path.join(dataset_path, 'mm_adj_{}.pt'.format(self.knn_k))\n\n        if self.v_feat is not None:\n            self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=False)\n            self.image_trs = nn.Linear(self.v_feat.shape[1], self.feat_embed_dim)\n        if self.t_feat is not None:\n            self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=False)\n            self.text_trs = nn.Linear(self.t_feat.shape[1], self.feat_embed_dim)\n\n        if os.path.exists(mm_adj_file):\n            self.mm_adj = torch.load(mm_adj_file)\n        else:\n            if self.v_feat is not None:\n                indices, image_adj = self.get_knn_adj_mat(self.image_embedding.weight.detach())\n                self.mm_adj = image_adj\n            if self.t_feat is not None:\n                indices, text_adj = self.get_knn_adj_mat(self.text_embedding.weight.detach())\n                self.mm_adj = text_adj\n            if self.v_feat is not None and self.t_feat is not None:\n                self.mm_adj = self.mm_image_weight * image_adj + (1.0 - self.mm_image_weight) * text_adj\n                del text_adj\n                del image_adj\n            torch.save(self.mm_adj, mm_adj_file)\n\n        # packing interaction in training into edge_index\n        train_interactions = dataset.inter_matrix(form='coo').astype(np.float32)\n        edge_index = self.pack_edge_index(train_interactions)\n        self.edge_index = torch.tensor(edge_index, dtype=torch.long).t().contiguous().to(self.device)\n        self.edge_index = torch.cat((self.edge_index, self.edge_index[[1, 0]]), dim=1)\n\n        # pdb.set_trace()\n        self.weight_u = nn.Parameter(nn.init.xavier_normal_(\n            torch.tensor(np.random.randn(self.num_user, 2, 1), dtype=torch.float32, requires_grad=True)))\n        self.weight_u.data = F.softmax(self.weight_u, dim=1)\n\n        self.weight_i = nn.Parameter(nn.init.xavier_normal_(\n            torch.tensor(np.random.randn(self.num_item, 2, 1), dtype=torch.float32, requires_grad=True)))\n        self.weight_i.data = F.softmax(self.weight_i, dim=1)\n\n        self.item_index = torch.zeros([self.num_item], dtype=torch.long)\n        index = []\n        for i in range(self.num_item):\n            self.item_index[i] = i\n            index.append(i)\n        self.drop_percent = self.drop_rate\n        self.single_percent = 1\n        self.double_percent = 0\n\n        drop_item = torch.tensor(\n            np.random.choice(self.item_index, int(self.num_item * self.drop_percent), replace=False))\n        drop_item_single = drop_item[:int(self.single_percent * len(drop_item))]\n\n        self.dropv_node_idx_single = drop_item_single[:int(len(drop_item_single) * 1 / 3)]\n        self.dropt_node_idx_single = drop_item_single[int(len(drop_item_single) * 2 / 3):]\n\n        self.dropv_node_idx = self.dropv_node_idx_single\n        self.dropt_node_idx = self.dropt_node_idx_single\n\n        mask_cnt = torch.zeros(self.num_item, dtype=int).tolist()\n        for edge in edge_index:\n            mask_cnt[edge[1] - self.num_user] += 1\n        mask_dropv = []\n        mask_dropt = []\n        for idx, num in enumerate(mask_cnt):\n            temp_false = [False] * num\n            temp_true = [True] * num\n            mask_dropv.extend(temp_false) if idx in self.dropv_node_idx else mask_dropv.extend(temp_true)\n            mask_dropt.extend(temp_false) if idx in self.dropt_node_idx else mask_dropt.extend(temp_true)\n\n        edge_index = edge_index[np.lexsort(edge_index.T[1, None])]\n        edge_index_dropv = edge_index[mask_dropv]\n        edge_index_dropt = edge_index[mask_dropt]\n\n        self.edge_index_dropv = torch.tensor(edge_index_dropv).t().contiguous().to(self.device)\n        self.edge_index_dropt = torch.tensor(edge_index_dropt).t().contiguous().to(self.device)\n\n        self.edge_index_dropv = torch.cat((self.edge_index_dropv, self.edge_index_dropv[[1, 0]]), dim=1)\n        self.edge_index_dropt = torch.cat((self.edge_index_dropt, self.edge_index_dropt[[1, 0]]), dim=1)\n\n        self.MLP_user = nn.Linear(self.dim_latent * 2, self.dim_latent)\n\n        if self.v_feat is not None:\n            self.v_drop_ze = torch.zeros(len(self.dropv_node_idx), self.v_feat.size(1)).to(self.device)\n            self.v_gcn = GCN(self.dataset, batch_size, num_user, num_item, dim_x, self.aggr_mode,\n                             num_layer=self.num_layer, has_id=has_id, dropout=self.drop_rate, dim_latent=64,\n                             device=self.device, features=self.v_feat)  # 256)\n        if self.t_feat is not None:\n            self.t_drop_ze = torch.zeros(len(self.dropt_node_idx), self.t_feat.size(1)).to(self.device)\n            self.t_gcn = GCN(self.dataset, batch_size, num_user, num_item, dim_x, self.aggr_mode,\n                             num_layer=self.num_layer, has_id=has_id, dropout=self.drop_rate, dim_latent=64,\n                             device=self.device, features=self.t_feat)\n\n        self.user_graph = User_Graph_sample(num_user, 'add', self.dim_latent)\n\n        self.result_embed = nn.Parameter(\n            nn.init.xavier_normal_(torch.tensor(np.random.randn(num_user + num_item, dim_x)))).to(self.device)\n\n    def get_knn_adj_mat(self, mm_embeddings):\n        context_norm = mm_embeddings.div(torch.norm(mm_embeddings, p=2, dim=-1, keepdim=True))\n        sim = torch.mm(context_norm, context_norm.transpose(1, 0))\n        _, knn_ind = torch.topk(sim, self.knn_k, dim=-1)\n        adj_size = sim.size()\n        del sim\n        # construct sparse adj\n        indices0 = torch.arange(knn_ind.shape[0]).to(self.device)\n        indices0 = torch.unsqueeze(indices0, 1)\n        indices0 = indices0.expand(-1, self.knn_k)\n        indices = torch.stack((torch.flatten(indices0), torch.flatten(knn_ind)), 0)\n        # norm\n        return indices, self.compute_normalized_laplacian(indices, adj_size)\n\n    def compute_normalized_laplacian(self, indices, adj_size):\n        adj = torch.sparse.FloatTensor(indices, torch.ones_like(indices[0]), adj_size)\n        row_sum = 1e-7 + torch.sparse.sum(adj, -1).to_dense()\n        r_inv_sqrt = torch.pow(row_sum, -0.5)\n        rows_inv_sqrt = r_inv_sqrt[indices[0]]\n        cols_inv_sqrt = r_inv_sqrt[indices[1]]\n        values = rows_inv_sqrt * cols_inv_sqrt\n        return torch.sparse.FloatTensor(indices, values, adj_size)\n\n    def pre_epoch_processing(self):\n        self.epoch_user_graph, self.user_weight_matrix = self.topk_sample(self.k)\n        self.user_weight_matrix = self.user_weight_matrix.to(self.device)\n\n    def pack_edge_index(self, inter_mat):\n        rows = inter_mat.row\n        cols = inter_mat.col + self.n_users\n        # ndarray([598918, 2]) for ml-imdb\n        return np.column_stack((rows, cols))\n\n    def forward(self, interaction):\n        user_nodes, pos_item_nodes, neg_item_nodes = interaction[0], interaction[1], interaction[2]\n        pos_item_nodes += self.n_users\n        neg_item_nodes += self.n_users\n        representation = None\n\n        if self.v_feat is not None:\n            self.v_rep, self.v_preference = self.v_gcn(self.edge_index_dropv, self.edge_index, self.v_feat)\n            representation = self.v_rep\n        if self.t_feat is not None:\n            self.t_rep, self.t_preference = self.t_gcn(self.edge_index_dropt, self.edge_index, self.t_feat)\n            if representation is None:\n                representation = self.t_rep\n            else:\n                if self.construction == 'cat':\n                    representation = torch.cat((self.v_rep, self.t_rep), dim=1)\n                else:\n                    representation += self.t_rep\n\n        if self.construction == 'weighted_sum':\n            if self.v_rep is not None:\n                self.v_rep = torch.unsqueeze(self.v_rep, 2)\n                user_rep = self.v_rep[:self.num_user]\n            if self.t_rep is not None:\n                self.t_rep = torch.unsqueeze(self.t_rep, 2)\n                user_rep = self.t_rep[:self.num_user]\n            if self.v_rep is not None and self.t_rep is not None:\n                user_rep = torch.matmul(torch.cat((self.v_rep[:self.num_user], self.t_rep[:self.num_user]), dim=2),\n                                        self.weight_u)\n            user_rep = torch.squeeze(user_rep)\n\n        if self.construction == 'weighted_max':\n            # pdb.set_trace()\n            self.v_rep = torch.unsqueeze(self.v_rep, 2)\n\n            self.t_rep = torch.unsqueeze(self.t_rep, 2)\n\n            user_rep = torch.cat((self.v_rep[:self.num_user], self.t_rep[:self.num_user]), dim=2)\n            user_rep = self.weight_u.transpose(1, 2) * user_rep\n            user_rep = torch.max(user_rep, dim=2).values\n        if self.construction == 'cat':\n            # pdb.set_trace()\n            if self.v_rep is not None:\n                user_rep = self.v_rep[:self.num_user]\n            if self.t_rep is not None:\n                user_rep = self.t_rep[:self.num_user]\n            if self.v_rep is not None and self.t_rep is not None:\n                self.v_rep = torch.unsqueeze(self.v_rep, 2)\n                self.t_rep = torch.unsqueeze(self.t_rep, 2)\n                user_rep = torch.cat((self.v_rep[:self.num_user], self.t_rep[:self.num_user]), dim=2)\n                user_rep = self.weight_u.transpose(1, 2) * user_rep\n\n                user_rep = torch.cat((user_rep[:, :, 0], user_rep[:, :, 1]), dim=1)\n\n        item_rep = representation[self.num_user:]\n\n        ############################################ multi-modal information aggregation\n        h = item_rep\n        for i in range(self.n_layers):\n            h = torch.sparse.mm(self.mm_adj, h)\n        h_u1 = self.user_graph(user_rep, self.epoch_user_graph, self.user_weight_matrix)\n        user_rep = user_rep + h_u1\n        item_rep = item_rep + h\n        self.result_embed = torch.cat((user_rep, item_rep), dim=0)\n        user_tensor = self.result_embed[user_nodes]\n        pos_item_tensor = self.result_embed[pos_item_nodes]\n        neg_item_tensor = self.result_embed[neg_item_nodes]\n        pos_scores = torch.sum(user_tensor * pos_item_tensor, dim=1)\n        neg_scores = torch.sum(user_tensor * neg_item_tensor, dim=1)\n        return pos_scores, neg_scores\n\n    def calculate_loss(self, interaction):\n        user = interaction[0]\n        pos_scores, neg_scores = self.forward(interaction)\n        loss_value = -torch.mean(torch.log2(torch.sigmoid(pos_scores - neg_scores)))\n        reg_embedding_loss_v = (self.v_preference[user] ** 2).mean() if self.v_preference is not None else 0.0\n        reg_embedding_loss_t = (self.t_preference[user] ** 2).mean() if self.t_preference is not None else 0.0\n\n        reg_loss = self.reg_weight * (reg_embedding_loss_v + reg_embedding_loss_t)\n        if self.construction == 'weighted_sum':\n            reg_loss += self.reg_weight * (self.weight_u ** 2).mean()\n            reg_loss += self.reg_weight * (self.weight_i ** 2).mean()\n        elif self.construction == 'cat':\n            reg_loss += self.reg_weight * (self.weight_u ** 2).mean()\n        elif self.construction == 'cat_mlp':\n            reg_loss += self.reg_weight * (self.MLP_user.weight ** 2).mean()\n        return loss_value + reg_loss\n\n    def full_sort_predict(self, interaction):\n        user_tensor = self.result_embed[:self.n_users]\n        item_tensor = self.result_embed[self.n_users:]\n\n        temp_user_tensor = user_tensor[interaction[0], :]\n        score_matrix = torch.matmul(temp_user_tensor, item_tensor.t())\n        return score_matrix\n\n    def topk_sample(self, k):\n        user_graph_index = []\n        count_num = 0\n        user_weight_matrix = torch.zeros(len(self.user_graph_dict), k)\n        tasike = []\n        for i in range(k):\n            tasike.append(0)\n        for i in range(len(self.user_graph_dict)):\n            if len(self.user_graph_dict[i][0]) < k:\n                count_num += 1\n                if len(self.user_graph_dict[i][0]) == 0:\n                    # pdb.set_trace()\n                    user_graph_index.append(tasike)\n                    continue\n                user_graph_sample = self.user_graph_dict[i][0][:k]\n                user_graph_weight = self.user_graph_dict[i][1][:k]\n                while len(user_graph_sample) < k:\n                    rand_index = np.random.randint(0, len(user_graph_sample))\n                    user_graph_sample.append(user_graph_sample[rand_index])\n                    user_graph_weight.append(user_graph_weight[rand_index])\n                user_graph_index.append(user_graph_sample)\n\n                if self.user_aggr_mode == 'softmax':\n                    user_weight_matrix[i] = F.softmax(torch.tensor(user_graph_weight), dim=0)  # softmax\n                if self.user_aggr_mode == 'mean':\n                    user_weight_matrix[i] = torch.ones(k) / k  # mean\n                continue\n            user_graph_sample = self.user_graph_dict[i][0][:k]\n            user_graph_weight = self.user_graph_dict[i][1][:k]\n\n            if self.user_aggr_mode == 'softmax':\n                user_weight_matrix[i] = F.softmax(torch.tensor(user_graph_weight), dim=0)  # softmax\n            if self.user_aggr_mode == 'mean':\n                user_weight_matrix[i] = torch.ones(k) / k  # mean\n            user_graph_index.append(user_graph_sample)\n\n        # pdb.set_trace()\n        return user_graph_index, user_weight_matrix\n\n\nclass User_Graph_sample(torch.nn.Module):\n    def __init__(self, num_user, aggr_mode, dim_latent):\n        super(User_Graph_sample, self).__init__()\n        self.num_user = num_user\n        self.dim_latent = dim_latent\n        self.aggr_mode = aggr_mode\n\n    def forward(self, features, user_graph, user_matrix):\n        index = user_graph\n        u_features = features[index]\n        user_matrix = user_matrix.unsqueeze(1)\n        # pdb.set_trace()\n        u_pre = torch.matmul(user_matrix, u_features)\n        u_pre = u_pre.squeeze()\n        return u_pre\n\n\nclass GCN(torch.nn.Module):\n    def __init__(self, datasets, batch_size, num_user, num_item, dim_id, aggr_mode, num_layer, has_id, dropout,\n                 dim_latent=None, device=None, features=None):\n        super(GCN, self).__init__()\n        self.batch_size = batch_size\n        self.num_user = num_user\n        self.num_item = num_item\n        self.datasets = datasets\n        self.dim_id = dim_id\n        self.dim_feat = features.size(1)\n        self.dim_latent = dim_latent\n        self.aggr_mode = aggr_mode\n        self.num_layer = num_layer\n        self.has_id = has_id\n        self.dropout = dropout\n        self.device = device\n\n        if self.dim_latent:\n            self.preference = nn.Parameter(nn.init.xavier_normal_(torch.tensor(\n                np.random.randn(num_user, self.dim_latent), dtype=torch.float32, requires_grad=True),\n                gain=1).to(self.device))\n            self.MLP = nn.Linear(self.dim_feat, 4 * self.dim_latent)\n            self.MLP_1 = nn.Linear(4 * self.dim_latent, self.dim_latent)\n            self.conv_embed_1 = Base_gcn(self.dim_latent, self.dim_latent, aggr=self.aggr_mode)\n\n        else:\n            self.preference = nn.Parameter(nn.init.xavier_normal_(torch.tensor(\n                np.random.randn(num_user, self.dim_feat), dtype=torch.float32, requires_grad=True),\n                gain=1).to(self.device))\n            self.conv_embed_1 = Base_gcn(self.dim_latent, self.dim_latent, aggr=self.aggr_mode)\n\n    def forward(self, edge_index_drop, edge_index, features):\n        temp_features = self.MLP_1(F.leaky_relu(self.MLP(features))) if self.dim_latent else features\n        x = torch.cat((self.preference, temp_features), dim=0).to(self.device)\n        x = F.normalize(x).to(self.device)\n        h = self.conv_embed_1(x, edge_index)  # equation 1\n        h_1 = self.conv_embed_1(h, edge_index)\n\n        x_hat = h + x + h_1\n        return x_hat, self.preference\n\n\nclass Base_gcn(MessagePassing):\n    def __init__(self, in_channels, out_channels, normalize=True, bias=True, aggr='add', **kwargs):\n        super(Base_gcn, self).__init__(aggr=aggr, **kwargs)\n        self.aggr = aggr\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n\n    def forward(self, x, edge_index, size=None):\n        # pdb.set_trace()\n        if size is None:\n            edge_index, _ = remove_self_loops(edge_index)\n            # edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))\n        x = x.unsqueeze(-1) if x.dim() == 1 else x\n        # pdb.set_trace()\n        return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x)\n\n    def message(self, x_j, edge_index, size):\n        if self.aggr == 'add':\n            # pdb.set_trace()\n            row, col = edge_index\n            deg = degree(row, size[0], dtype=x_j.dtype)\n            deg_inv_sqrt = deg.pow(-0.5)\n            norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]\n            return norm.view(-1, 1) * x_j\n        return x_j\n\n    def update(self, aggr_out):\n        return aggr_out\n\n    def __repr(self):\n        return '{}({},{})'.format(self.__class__.__name__, self.in_channels, self.out_channels)\n\n\n"
  },
  {
    "path": "src/models/dualgnn.py",
    "content": "# coding: utf-8\n# \n\"\"\"\nDualGNN: Dual Graph Neural Network for Multimedia Recommendation, IEEE Transactions on Multimedia 2021.\n\"\"\"\nimport os\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.utils import remove_self_loops, add_self_loops, degree\nimport torch_geometric\n\nfrom common.abstract_recommender import GeneralRecommender\nfrom common.loss import BPRLoss, EmbLoss\nfrom common.init import xavier_uniform_initialization\n\n\nclass DualGNN(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(DualGNN, self).__init__(config, dataset)\n\n        num_user = self.n_users\n        num_item = self.n_items\n        batch_size = config['train_batch_size']         # not used\n        dim_x = config['embedding_size']\n        has_id = True\n\n        self.batch_size = batch_size\n        self.num_user = num_user\n        self.num_item = num_item\n        self.k = 40\n        self.aggr_mode = config['aggr_mode']\n        self.user_aggr_mode = 'softmax'\n        self.num_layer = 1\n        self.cold_start = 0\n        self.dataset = dataset\n        self.construction = 'weighted_sum'\n        self.reg_weight = config['reg_weight']\n        self.drop_rate = 0.1\n        self.v_rep = None\n        self.t_rep = None\n        self.v_preference = None\n        self.t_preference = None\n        self.dim_latent = 64\n        self.dim_feat = 128\n        self.MLP_v = nn.Linear(self.dim_latent, self.dim_latent, bias=False)\n        self.MLP_t = nn.Linear(self.dim_latent, self.dim_latent, bias=False)\n\n        dataset_path = os.path.abspath(config['data_path'] + config['dataset'])\n        self.user_graph_dict = np.load(os.path.join(dataset_path, config['user_graph_dict_file']), allow_pickle=True).item()\n\n        # packing interaction in training into edge_index\n        train_interactions = dataset.inter_matrix(form='coo').astype(np.float32)\n        #edge_index = torch.tensor(self.pack_edge_index(train_interactions), dtype=torch.long)\n        edge_index = self.pack_edge_index(train_interactions)\n        self.edge_index = torch.tensor(edge_index, dtype=torch.long).t().contiguous().to(self.device)\n        self.edge_index = torch.cat((self.edge_index, self.edge_index[[1, 0]]), dim=1)\n\n        # pdb.set_trace()\n        self.weight_u = nn.Parameter(nn.init.xavier_normal_(\n            torch.tensor(np.random.randn(self.num_user, 2, 1), dtype=torch.float32, requires_grad=True)))\n        self.weight_u.data = F.softmax(self.weight_u.data, dim=1)\n\n        self.weight_i = nn.Parameter(nn.init.xavier_normal_(\n            torch.tensor(np.random.randn(self.num_item, 2, 1), dtype=torch.float32, requires_grad=True)))\n        self.weight_i.data = F.softmax(self.weight_i.data, dim=1)\n\n        self.item_index = torch.zeros([self.num_item], dtype=torch.long)\n        index = []\n        for i in range(self.num_item):\n            self.item_index[i] = i\n            index.append(i)\n        self.drop_percent = self.drop_rate\n        self.single_percent = 1\n        self.double_percent = 0\n        # pdb.set_trace()\n        drop_item = torch.tensor(\n            np.random.choice(self.item_index, int(self.num_item * self.drop_percent), replace=False))\n        drop_item_single = drop_item[:int(self.single_percent * len(drop_item))]\n\n        # random.shuffle(index)\n        # self.item_index = self.item_index[index]\n        self.dropv_node_idx_single = drop_item_single[:int(len(drop_item_single) * 1 / 3)]\n        self.dropt_node_idx_single = drop_item_single[int(len(drop_item_single) * 2 / 3):]\n\n        self.dropv_node_idx = self.dropv_node_idx_single\n        self.dropt_node_idx = self.dropt_node_idx_single\n        # pdb.set_trace()\n        mask_cnt = torch.zeros(self.num_item, dtype=int).tolist()\n        for edge in edge_index:\n            mask_cnt[edge[1] - self.num_user] += 1\n        mask_dropv = []\n        mask_dropt = []\n        for idx, num in enumerate(mask_cnt):\n            temp_false = [False] * num\n            temp_true = [True] * num\n            mask_dropv.extend(temp_false) if idx in self.dropv_node_idx else mask_dropv.extend(temp_true)\n            mask_dropt.extend(temp_false) if idx in self.dropt_node_idx else mask_dropt.extend(temp_true)\n\n        edge_index = edge_index[np.lexsort(edge_index.T[1, None])]\n        edge_index_dropv = edge_index[mask_dropv]\n        edge_index_dropt = edge_index[mask_dropt]\n\n        self.edge_index_dropv = torch.tensor(edge_index_dropv).t().contiguous().to(self.device)\n        self.edge_index_dropt = torch.tensor(edge_index_dropt).t().contiguous().to(self.device)\n\n        self.edge_index_dropv = torch.cat((self.edge_index_dropv, self.edge_index_dropv[[1, 0]]), dim=1)\n        self.edge_index_dropt = torch.cat((self.edge_index_dropt, self.edge_index_dropt[[1, 0]]), dim=1)\n\n        self.MLP_user = nn.Linear(self.dim_latent * 3, self.dim_latent)\n        #self.v_feat = torch.tensor(v_feat, dtype=torch.float).to(self.device)\n        #self.t_feat = torch.tensor(t_feat, dtype=torch.float).to(self.device)\n        if self.v_feat is not None:\n            self.v_drop_ze = torch.zeros(len(self.dropv_node_idx), self.v_feat.size(1)).to(self.device)\n            self.v_gcn = GCN(self.dataset, batch_size, num_user, num_item, dim_x, self.aggr_mode,\n                         num_layer=self.num_layer, has_id=has_id, dropout=self.drop_rate, dim_latent=64,\n                         device=self.device, features=self.v_feat)  # 256)\n        if self.t_feat is not None:\n            self.t_drop_ze = torch.zeros(len(self.dropt_node_idx), self.t_feat.size(1)).to(self.device)\n            self.t_gcn = GCN(self.dataset, batch_size, num_user, num_item, dim_x, self.aggr_mode,\n                         num_layer=self.num_layer, has_id=has_id, dropout=self.drop_rate, dim_latent=64,\n                         device=self.device, features=self.t_feat)\n\n        self.user_graph = User_Graph_sample(num_user, 'add', self.dim_latent)\n\n        self.result_embed = nn.Parameter(nn.init.xavier_normal_(torch.tensor(np.random.randn(num_user + num_item, dim_x)))).to(self.device)\n\n    def pre_epoch_processing(self):\n        self.epoch_user_graph, self.user_weight_matrix = self.topk_sample(self.k)\n        self.user_weight_matrix = self.user_weight_matrix.to(self.device)\n\n    def pack_edge_index(self, inter_mat):\n        rows = inter_mat.row\n        cols = inter_mat.col + self.n_users\n        # ndarray([598918, 2]) for ml-imdb\n        return np.column_stack((rows, cols))\n\n    def forward(self, interaction):\n        user_nodes, pos_item_nodes, neg_item_nodes = interaction[0], interaction[1], interaction[2]\n        pos_item_nodes += self.n_users\n        neg_item_nodes += self.n_users\n        representation = None\n        if self.v_feat is not None:\n            self.v_rep, self.v_preference = self.v_gcn(self.edge_index_dropv, self.edge_index, self.v_feat)\n            representation = self.v_rep\n        if self.t_feat is not None:\n            self.t_rep, self.t_preference = self.t_gcn(self.edge_index_dropt, self.edge_index, self.t_feat)\n            if representation is None:\n                representation = self.t_rep\n            else:\n                representation += self.t_rep\n        # representation = self.v_rep+self.a_rep+self.t_rep\n\n        # pdb.set_trace()\n        if self.construction == 'weighted_sum':\n            if self.v_rep is not None:\n                self.v_rep = torch.unsqueeze(self.v_rep, 2)\n                user_rep = self.v_rep[:self.num_user]\n            if self.t_rep is not None:\n                self.t_rep = torch.unsqueeze(self.t_rep, 2)\n                user_rep = self.t_rep[:self.num_user]\n            if self.v_rep is not None and self.t_rep is not None:\n                user_rep = torch.matmul(torch.cat((self.v_rep[:self.num_user], self.t_rep[:self.num_user]), dim=2),\n                                        self.weight_u)\n            user_rep = torch.squeeze(user_rep)\n\n        item_rep = representation[self.num_user:]\n        ############################################ multi-modal information aggregation\n        h_u1 = self.user_graph(user_rep, self.epoch_user_graph, self.user_weight_matrix)\n        user_rep = user_rep + h_u1\n        self.result_embed = torch.cat((user_rep, item_rep), dim=0)\n        user_tensor = self.result_embed[user_nodes]\n        pos_item_tensor = self.result_embed[pos_item_nodes]\n        neg_item_tensor = self.result_embed[neg_item_nodes]\n        pos_scores = torch.sum(user_tensor * pos_item_tensor, dim=1)\n        neg_scores = torch.sum(user_tensor * neg_item_tensor, dim=1)\n        return pos_scores, neg_scores\n\n    def calculate_loss(self, interaction):\n        user = interaction[0]\n        pos_scores, neg_scores = self.forward(interaction)\n        loss_value = -torch.mean(torch.log2(torch.sigmoid(pos_scores - neg_scores)))\n        reg_embedding_loss_v = (self.v_preference[user] ** 2).mean() if self.v_preference is not None else 0.0\n        # reg_embedding_loss_a = (self.a_preference[user.to(self.device)] ** 2).mean()\n        reg_embedding_loss_t = (self.t_preference[user] ** 2).mean() if self.t_preference is not None else 0.0\n\n        reg_loss = self.reg_weight * (reg_embedding_loss_v + reg_embedding_loss_t)\n        # reg_loss = self.reg_weight * (reg_embedding_loss_v+reg_embedding_loss_a+reg_embedding_loss_t)\n        if self.construction == 'weighted_sum':\n            reg_loss += self.reg_weight * (self.weight_u ** 2).mean()\n            reg_loss += self.reg_weight * (self.weight_i ** 2).mean()\n        elif self.construction == 'cat_mlp':\n            reg_loss += self.reg_weight * (self.MLP_user.weight ** 2).mean()\n        return loss_value + reg_loss\n\n    def full_sort_predict(self, interaction):\n        user_tensor = self.result_embed[:self.n_users]\n        item_tensor = self.result_embed[self.n_users:]\n\n        temp_user_tensor = user_tensor[interaction[0], :]\n        score_matrix = torch.matmul(temp_user_tensor, item_tensor.t())\n        return score_matrix\n\n    def topk_sample(self, k):\n        user_graph_index = []\n        count_num = 0\n        user_weight_matrix = torch.zeros(len(self.user_graph_dict), k)\n        tasike = []\n        for i in range(k):\n            tasike.append(0)\n        for i in range(len(self.user_graph_dict)):\n            if len(self.user_graph_dict[i][0]) < k:\n                count_num += 1\n                if len(self.user_graph_dict[i][0]) == 0:\n                    # pdb.set_trace()\n                    user_graph_index.append(tasike)\n                    continue\n                user_graph_sample = self.user_graph_dict[i][0][:k]\n                user_graph_weight = self.user_graph_dict[i][1][:k]\n                while len(user_graph_sample) < k:\n                    # pdb.set_trace()\n                    rand_index = np.random.randint(0, len(user_graph_sample))\n                    user_graph_sample.append(user_graph_sample[rand_index])\n                    user_graph_weight.append(user_graph_weight[rand_index])\n                user_graph_index.append(user_graph_sample)\n\n                # user_weight_matrix[i] = torch.tensor(user_graph_weight) / sum(user_graph_weight) #weighted\n                if self.user_aggr_mode == 'softmax':\n                    user_weight_matrix[i] = F.softmax(torch.tensor(user_graph_weight), dim=0)  # softmax\n                if self.user_aggr_mode == 'mean':\n                    user_weight_matrix[i] = torch.ones(k) / k  # mean\n                # pdb.set_trace()\n                continue\n            user_graph_sample = self.user_graph_dict[i][0][:k]\n            user_graph_weight = self.user_graph_dict[i][1][:k]\n\n            # user_weight_matrix[i] = torch.tensor(user_graph_weight) / sum(user_graph_weight) #weighted\n            if self.user_aggr_mode == 'softmax':\n                user_weight_matrix[i] = F.softmax(torch.tensor(user_graph_weight), dim=0)  # softmax\n            if self.user_aggr_mode == 'mean':\n                # pdb.set_trace()\n                user_weight_matrix[i] = torch.ones(k) / k  # mean\n            # user_weight_list.append(user_weight)\n            user_graph_index.append(user_graph_sample)\n\n        # pdb.set_trace()\n        return user_graph_index, user_weight_matrix\n\nclass User_Graph_sample(torch.nn.Module):\n    def __init__(self, num_user, aggr_mode,dim_latent):\n        super(User_Graph_sample, self).__init__()\n        self.num_user = num_user\n        self.dim_latent = dim_latent\n        self.aggr_mode = aggr_mode\n\n    def forward(self, features,user_graph,user_matrix):\n        index = user_graph\n        u_features = features[index]\n        user_matrix = user_matrix.unsqueeze(1)\n        # pdb.set_trace()\n        u_pre = torch.matmul(user_matrix,u_features)\n        u_pre = u_pre.squeeze()\n        return u_pre\n\n\nclass GCN(torch.nn.Module):\n    def __init__(self,datasets, batch_size, num_user, num_item, dim_id, aggr_mode, num_layer, has_id, dropout,\n                 dim_latent=None,device = None,features=None):\n        super(GCN, self).__init__()\n        self.batch_size = batch_size\n        self.num_user = num_user\n        self.num_item = num_item\n        self.datasets = datasets\n        self.dim_id = dim_id\n        # if self.datasets =='tiktok' or self.datasets =='tiktok_new' or self.datasets == 'cold_tiktok':\n        #      self.dim_feat = 128\n        # elif self.datasets == 'Movielens' or self.datasets == 'cold_movie' or self.datasets == 'ml-imdb-npy':\n        #      self.dim_feat = features.size(1)\n        self.dim_feat = features.size(1)\n        self.dim_latent = dim_latent\n        self.aggr_mode = aggr_mode\n        self.num_layer = num_layer\n        self.has_id = has_id\n        self.dropout = dropout\n        self.device = device\n\n        if self.dim_latent:\n            self.preference = nn.Parameter(nn.init.xavier_normal_(torch.tensor(\n                np.random.randn(num_user, self.dim_latent), dtype=torch.float32, requires_grad=True),\n                gain=1).to(self.device))\n            self.MLP = nn.Linear(self.dim_feat, 4*self.dim_latent)\n            self.MLP_1 = nn.Linear(4*self.dim_latent, self.dim_latent)\n            self.conv_embed_1 = Base_gcn(self.dim_latent, self.dim_latent, aggr=self.aggr_mode)\n\n        else:\n            self.preference = nn.Parameter(nn.init.xavier_normal_(torch.tensor(\n                np.random.randn(num_user, self.dim_feat), dtype=torch.float32, requires_grad=True),\n                gain=1).to(self.device))\n            self.conv_embed_1 = Base_gcn(self.dim_latent, self.dim_latent, aggr=self.aggr_mode)\n\n    def forward(self, edge_index_drop,edge_index,features):\n        # pdb.set_trace()\n        temp_features = self.MLP_1(F.leaky_relu(self.MLP(features))) if self.dim_latent else features\n        # temp_features = F.normalize(temp_features)\n        x = torch.cat((self.preference, temp_features), dim=0).to(self.device)\n        x = F.normalize(x).to(self.device)\n        # pdb.set_trace()\n        h = self.conv_embed_1(x, edge_index)  # equation 1\n        h_1 = self.conv_embed_1(h, edge_index)\n\n        x_hat =h + x +h_1\n        return x_hat, self.preference\n\n\nclass Base_gcn(MessagePassing):\n    def __init__(self, in_channels, out_channels, normalize=True, bias=True, aggr='add', **kwargs):\n        super(Base_gcn, self).__init__(aggr=aggr, **kwargs)\n        self.aggr = aggr\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n\n    def forward(self, x, edge_index, size=None):\n        # pdb.set_trace()\n        if size is None:\n            edge_index, _ = remove_self_loops(edge_index)\n            # edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))\n        x = x.unsqueeze(-1) if x.dim() == 1 else x\n        # pdb.set_trace()\n        return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x)\n\n    def message(self, x_j, edge_index, size):\n        if self.aggr == 'add':\n            # pdb.set_trace()\n            row, col = edge_index\n            deg = degree(row, size[0], dtype=x_j.dtype)\n            deg_inv_sqrt = deg.pow(-0.5)\n            norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]\n            return norm.view(-1, 1) * x_j\n        return x_j\n\n    def update(self, aggr_out):\n        return aggr_out\n\n    def __repr(self):\n        return '{}({},{})'.format(self.__class__.__name__, self.in_channels, self.out_channels)\n\n\n"
  },
  {
    "path": "src/models/freedom.py",
    "content": "# coding: utf-8\n# @email: enoche.chow@gmail.com\nr\"\"\"\nFREEDOM: A Tale of Two Graphs: Freezing and Denoising Graph Structures for Multimodal Recommendation\n# Update: 01/08/2022\n\"\"\"\n\n\nimport os\nimport random\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom common.abstract_recommender import GeneralRecommender\nfrom common.loss import BPRLoss, EmbLoss, L2Loss\nfrom utils.utils import build_sim, compute_normalized_laplacian\n\n\nclass FREEDOM(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(FREEDOM, self).__init__(config, dataset)\n\n        self.embedding_dim = config['embedding_size']\n        self.feat_embed_dim = config['feat_embed_dim']\n        self.knn_k = config['knn_k']\n        self.lambda_coeff = config['lambda_coeff']\n        self.cf_model = config['cf_model']\n        self.n_layers = config['n_mm_layers']\n        self.n_ui_layers = config['n_ui_layers']\n        self.reg_weight = config['reg_weight']\n        self.build_item_graph = True\n        self.mm_image_weight = config['mm_image_weight']\n        self.dropout = config['dropout']\n        self.degree_ratio = config['degree_ratio']\n\n        self.n_nodes = self.n_users + self.n_items\n\n        # load dataset info\n        self.interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32)\n        self.norm_adj = self.get_norm_adj_mat().to(self.device)\n        self.masked_adj, self.mm_adj = None, None\n        self.edge_indices, self.edge_values = self.get_edge_info()\n        self.edge_indices, self.edge_values = self.edge_indices.to(self.device), self.edge_values.to(self.device)\n        self.edge_full_indices = torch.arange(self.edge_values.size(0)).to(self.device)\n\n        self.user_embedding = nn.Embedding(self.n_users, self.embedding_dim)\n        self.item_id_embedding = nn.Embedding(self.n_items, self.embedding_dim)\n        nn.init.xavier_uniform_(self.user_embedding.weight)\n        nn.init.xavier_uniform_(self.item_id_embedding.weight)\n\n        dataset_path = os.path.abspath(config['data_path'] + config['dataset'])\n        mm_adj_file = os.path.join(dataset_path, 'mm_adj_freedomdsp_{}_{}.pt'.format(self.knn_k, int(10*self.mm_image_weight)))\n\n        if self.v_feat is not None:\n            self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=False)\n            self.image_trs = nn.Linear(self.v_feat.shape[1], self.feat_embed_dim)\n        if self.t_feat is not None:\n            self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=False)\n            self.text_trs = nn.Linear(self.t_feat.shape[1], self.feat_embed_dim)\n\n        if os.path.exists(mm_adj_file):\n            self.mm_adj = torch.load(mm_adj_file)\n        else:\n            if self.v_feat is not None:\n                indices, image_adj = self.get_knn_adj_mat(self.image_embedding.weight.detach())\n                self.mm_adj = image_adj\n            if self.t_feat is not None:\n                indices, text_adj = self.get_knn_adj_mat(self.text_embedding.weight.detach())\n                self.mm_adj = text_adj\n            if self.v_feat is not None and self.t_feat is not None:\n                self.mm_adj = self.mm_image_weight * image_adj + (1.0 - self.mm_image_weight) * text_adj\n                del text_adj\n                del image_adj\n            torch.save(self.mm_adj, mm_adj_file)\n\n    def get_knn_adj_mat(self, mm_embeddings):\n        context_norm = mm_embeddings.div(torch.norm(mm_embeddings, p=2, dim=-1, keepdim=True))\n        sim = torch.mm(context_norm, context_norm.transpose(1, 0))\n        _, knn_ind = torch.topk(sim, self.knn_k, dim=-1)\n        adj_size = sim.size()\n        del sim\n        # construct sparse adj\n        indices0 = torch.arange(knn_ind.shape[0]).to(self.device)\n        indices0 = torch.unsqueeze(indices0, 1)\n        indices0 = indices0.expand(-1, self.knn_k)\n        indices = torch.stack((torch.flatten(indices0), torch.flatten(knn_ind)), 0)\n        # norm\n        return indices, self.compute_normalized_laplacian(indices, adj_size)\n\n    def compute_normalized_laplacian(self, indices, adj_size):\n        adj = torch.sparse.FloatTensor(indices, torch.ones_like(indices[0]), adj_size)\n        row_sum = 1e-7 + torch.sparse.sum(adj, -1).to_dense()\n        r_inv_sqrt = torch.pow(row_sum, -0.5)\n        rows_inv_sqrt = r_inv_sqrt[indices[0]]\n        cols_inv_sqrt = r_inv_sqrt[indices[1]]\n        values = rows_inv_sqrt * cols_inv_sqrt\n        return torch.sparse.FloatTensor(indices, values, adj_size)\n\n    def get_norm_adj_mat(self):\n        A = sp.dok_matrix((self.n_users + self.n_items,\n                           self.n_users + self.n_items), dtype=np.float32)\n        inter_M = self.interaction_matrix\n        inter_M_t = self.interaction_matrix.transpose()\n        data_dict = dict(zip(zip(inter_M.row, inter_M.col + self.n_users),\n                             [1] * inter_M.nnz))\n        data_dict.update(dict(zip(zip(inter_M_t.row + self.n_users, inter_M_t.col),\n                                  [1] * inter_M_t.nnz)))\n        A._update(data_dict)\n        # norm adj matrix\n        sumArr = (A > 0).sum(axis=1)\n        # add epsilon to avoid Devide by zero Warning\n        diag = np.array(sumArr.flatten())[0] + 1e-7\n        diag = np.power(diag, -0.5)\n        D = sp.diags(diag)\n        L = D * A * D\n        # covert norm_adj matrix to tensor\n        L = sp.coo_matrix(L)\n        row = L.row\n        col = L.col\n        i = torch.LongTensor(np.array([row, col]))\n        data = torch.FloatTensor(L.data)\n\n        return torch.sparse.FloatTensor(i, data, torch.Size((self.n_nodes, self.n_nodes)))\n\n    def pre_epoch_processing(self):\n        if self.dropout <= .0:\n            self.masked_adj = self.norm_adj\n            return\n        # degree-sensitive edge pruning\n        degree_len = int(self.edge_values.size(0) * (1. - self.dropout))\n        degree_idx = torch.multinomial(self.edge_values, degree_len)\n        # random sample\n        keep_indices = self.edge_indices[:, degree_idx]\n        # norm values\n        keep_values = self._normalize_adj_m(keep_indices, torch.Size((self.n_users, self.n_items)))\n        all_values = torch.cat((keep_values, keep_values))\n        # update keep_indices to users/items+self.n_users\n        keep_indices[1] += self.n_users\n        all_indices = torch.cat((keep_indices, torch.flip(keep_indices, [0])), 1)\n        self.masked_adj = torch.sparse.FloatTensor(all_indices, all_values, self.norm_adj.shape).to(self.device)\n\n    def _normalize_adj_m(self, indices, adj_size):\n        adj = torch.sparse.FloatTensor(indices, torch.ones_like(indices[0]), adj_size)\n        row_sum = 1e-7 + torch.sparse.sum(adj, -1).to_dense()\n        col_sum = 1e-7 + torch.sparse.sum(adj.t(), -1).to_dense()\n        r_inv_sqrt = torch.pow(row_sum, -0.5)\n        rows_inv_sqrt = r_inv_sqrt[indices[0]]\n        c_inv_sqrt = torch.pow(col_sum, -0.5)\n        cols_inv_sqrt = c_inv_sqrt[indices[1]]\n        values = rows_inv_sqrt * cols_inv_sqrt\n        return values\n\n    def get_edge_info(self):\n        rows = torch.from_numpy(self.interaction_matrix.row)\n        cols = torch.from_numpy(self.interaction_matrix.col)\n        edges = torch.stack([rows, cols]).type(torch.LongTensor)\n        # edge normalized values\n        values = self._normalize_adj_m(edges, torch.Size((self.n_users, self.n_items)))\n        return edges, values\n\n    def forward(self, adj):\n        h = self.item_id_embedding.weight\n        for i in range(self.n_layers):\n            h = torch.sparse.mm(self.mm_adj, h)\n\n        ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0)\n        all_embeddings = [ego_embeddings]\n        for i in range(self.n_ui_layers):\n            side_embeddings = torch.sparse.mm(adj, ego_embeddings)\n            ego_embeddings = side_embeddings\n            all_embeddings += [ego_embeddings]\n        all_embeddings = torch.stack(all_embeddings, dim=1)\n        all_embeddings = all_embeddings.mean(dim=1, keepdim=False)\n        u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0)\n        return u_g_embeddings, i_g_embeddings + h\n\n    def bpr_loss(self, users, pos_items, neg_items):\n        pos_scores = torch.sum(torch.mul(users, pos_items), dim=1)\n        neg_scores = torch.sum(torch.mul(users, neg_items), dim=1)\n\n        maxi = F.logsigmoid(pos_scores - neg_scores)\n        mf_loss = -torch.mean(maxi)\n\n        return mf_loss\n\n    def calculate_loss(self, interaction):\n        users = interaction[0]\n        pos_items = interaction[1]\n        neg_items = interaction[2]\n\n        ua_embeddings, ia_embeddings = self.forward(self.masked_adj)\n        self.build_item_graph = False\n\n        u_g_embeddings = ua_embeddings[users]\n        pos_i_g_embeddings = ia_embeddings[pos_items]\n        neg_i_g_embeddings = ia_embeddings[neg_items]\n\n        batch_mf_loss = self.bpr_loss(u_g_embeddings, pos_i_g_embeddings,\n                                                                      neg_i_g_embeddings)\n        mf_v_loss, mf_t_loss = 0.0, 0.0\n        if self.t_feat is not None:\n            text_feats = self.text_trs(self.text_embedding.weight)\n            mf_t_loss = self.bpr_loss(ua_embeddings[users], text_feats[pos_items], text_feats[neg_items])\n        if self.v_feat is not None:\n            image_feats = self.image_trs(self.image_embedding.weight)\n            mf_v_loss = self.bpr_loss(ua_embeddings[users], image_feats[pos_items], image_feats[neg_items])\n        return batch_mf_loss + self.reg_weight * (mf_t_loss + mf_v_loss)\n\n    def full_sort_predict(self, interaction):\n        user = interaction[0]\n\n        restore_user_e, restore_item_e = self.forward(self.norm_adj)\n        u_embeddings = restore_user_e[user]\n\n        # dot with all item embedding to accelerate\n        scores = torch.matmul(u_embeddings, restore_item_e.transpose(0, 1))\n        return scores\n\n"
  },
  {
    "path": "src/models/grcn.py",
    "content": "# coding: utf-8\n# \n\"\"\"\nGraph-Refined Convolutional Network for Multimedia Recommendation with Implicit Feedback, MM 2020\n\"\"\"\nimport math\nimport time\nfrom tqdm import tqdm\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import Parameter\nimport torch.nn.functional as F\n#from SAGEConv import SAGEConv\n#from GATConv import GATConv\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.utils import add_self_loops, dropout_adj\nfrom torch_geometric.utils import remove_self_loops, add_self_loops, softmax\n\nfrom common.abstract_recommender import GeneralRecommender\nfrom common.loss import BPRLoss, EmbLoss\nfrom common.init import xavier_uniform_initialization\n# from torch.utils.checkpoint import checkpoint\n##########################################################################\n\nclass SAGEConv(MessagePassing):\n    def __init__(self, in_channels, out_channels, normalize=True, bias=True, aggr='mean', **kwargs):\n        super(SAGEConv, self).__init__(aggr=aggr, **kwargs)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n\n    def forward(self, x, edge_index, weight_vector, size=None):\n        self.weight_vector = weight_vector\n        return self.propagate(edge_index, size=size, x=x)\n\n    def message(self, x_j):\n        return x_j * self.weight_vector\n\n    def update(self, aggr_out):\n        return aggr_out\n\n    def __repr__(self):\n        return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,\n                                   self.out_channels)\n\nclass GATConv(MessagePassing):\n    def __init__(self, in_channels, out_channels, self_loops=False):\n        super(GATConv, self).__init__(aggr='add')#, **kwargs)\n        self.self_loops = self_loops\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n\n    def forward(self, x, edge_index, size=None):\n        edge_index, _ = remove_self_loops(edge_index)\n        if self.self_loops:\n            edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))\n\n        return self.propagate(edge_index, size=size, x=x)\n\n\n    def message(self,  x_i, x_j, size_i ,edge_index_i):\n        #print(edge_index_i, x_i, x_j)\n        self.alpha = torch.mul(x_i, x_j).sum(dim=-1)\n        #print(self.alpha)\n        #print(edge_index_i,size_i)\n        # alpha = F.tanh(alpha)\n        # self.alpha = F.leaky_relu(self.alpha)\n        # alpha = torch.sigmoid(alpha)\n        self.alpha = softmax(self.alpha, edge_index_i, num_nodes=size_i)\n        # Sample attention coefficients stochastically.\n        # alpha = F.dropout(alpha, p=self.dropout, training=self.training)\n        return x_j*self.alpha.view(-1,1)\n        # return x_j * alpha.view(-1, self.heads, 1)\n\n    def update(self, aggr_out):\n        return aggr_out\n\n\n\nclass EGCN(torch.nn.Module):\n    def __init__(self, num_user, num_item, dim_E, aggr_mode, has_act, has_norm):\n        super(EGCN, self).__init__()\n        self.num_user = num_user\n        self.num_item = num_item\n        self.dim_E = dim_E\n        self.aggr_mode = aggr_mode\n        self.has_act = has_act\n        self.has_norm = has_norm\n        self.id_embedding = nn.Parameter( nn.init.xavier_normal_(torch.rand((num_user+num_item, dim_E))))\n        self.conv_embed_1 = SAGEConv(dim_E, dim_E, aggr=aggr_mode)         \n        self.conv_embed_2 = SAGEConv(dim_E, dim_E, aggr=aggr_mode)\n\n    def forward(self, edge_index, weight_vector):\n        x = self.id_embedding\n        edge_index = torch.cat((edge_index, edge_index[[1,0]]), dim=1)\n\n        if self.has_norm:\n            x = F.normalize(x) \n\n        x_hat_1 = self.conv_embed_1(x, edge_index, weight_vector) \n\n        if self.has_act:\n            x_hat_1 = F.leaky_relu_(x_hat_1)\n\n        x_hat_2 = self.conv_embed_2(x_hat_1, edge_index, weight_vector)\n        if self.has_act:\n            x_hat_2 = F.leaky_relu_(x_hat_2)\n\n        return x + x_hat_1 + x_hat_2\n\n\nclass CGCN(torch.nn.Module):\n    def __init__(self, features, num_user, num_item, dim_C, aggr_mode, num_routing, has_act, has_norm, is_word=False):\n        super(CGCN, self).__init__()\n        self.num_user = num_user\n        self.num_item = num_item\n        self.aggr_mode = aggr_mode\n        self.num_routing = num_routing\n        self.has_act = has_act\n        self.has_norm = has_norm\n        self.dim_C = dim_C\n        self.preference = nn.Parameter(nn.init.xavier_normal_(torch.rand((num_user, dim_C))))\n        self.conv_embed_1 = GATConv(self.dim_C, self.dim_C)\n        self.is_word = is_word\n\n        if is_word:\n            self.word_tensor = torch.LongTensor(features).cuda()\n            self.features = nn.Embedding(torch.max(features[1])+1, dim_C)\n            nn.init.xavier_normal_(self.features.weight)\n\n        else:\n            self.dim_feat = features.size(1)\n            self.features = features\n            self.MLP = nn.Linear(self.dim_feat, self.dim_C)\n            #print('MLP weight',self.MLP.weight)\n            nn.init.xavier_normal_(self.MLP.weight)\n            #print(self.MLP.weight)\n\n    def forward(self, edge_index):\n        #print(self.features)\n        features = F.leaky_relu(self.MLP(self.features))\n        #print('features',features)\n        \n        if self.has_norm:\n            preference = F.normalize(self.preference)\n            features = F.normalize(features)\n            #print(preference,features)\n\n        for i in range(self.num_routing):\n            x = torch.cat((preference, features), dim=0)\n            #print(x,edge_index)\n            x_hat_1 = self.conv_embed_1(x, edge_index) \n            preference = preference + x_hat_1[:self.num_user]\n\n            if self.has_norm:\n                preference = F.normalize(preference)\n\n        x = torch.cat((preference, features), dim=0)\n        edge_index = torch.cat((edge_index, edge_index[[1,0]]), dim=1)\n\n        x_hat_1 = self.conv_embed_1(x, edge_index) \n\n        if self.has_act:\n            x_hat_1 = F.leaky_relu_(x_hat_1)\n\n        return x + x_hat_1, self.conv_embed_1.alpha.view(-1, 1)\n\n\nclass GRCN(GeneralRecommender):\n    def __init__(self,  config, dataset):\n        super(GRCN, self).__init__(config, dataset)\n        self.num_user = self.n_users\n        self.num_item = self.n_items\n        num_user = self.n_users\n        num_item = self.n_items\n        dim_x = config['embedding_size']\n        dim_C = config['latent_embedding']\n        num_layer = config['n_layers']\n        batch_size = config['train_batch_size']         # not used\n        self.aggr_mode = 'add'\n        self.weight_mode = 'confid'\n        self.fusion_mode = 'concat'\n        has_id = True\n        has_act= False\n        has_norm= True\n        is_word = False\n        self.weight = torch.tensor([[1.0], [-1.0]]).to(self.device)\n        self.reg_weight = config['reg_weight']\n        self.dropout = 0\n        # packing interaction in training into edge_index\n        train_interactions = dataset.inter_matrix(form='coo').astype(np.float32)\n        edge_index = torch.tensor(self.pack_edge_index(train_interactions), dtype=torch.long)\n        self.edge_index = edge_index.t().contiguous().to(self.device)\n        #self.edge_index = torch.cat((self.edge_index, self.edge_index[[1, 0]]), dim=1)\n        self.num_modal = 0\n        self.id_gcn = EGCN(num_user, num_item, dim_x, self.aggr_mode, has_act, has_norm)\n        self.pruning = True\n\n        num_model = 0\n        if self.v_feat is not None:\n            self.v_gcn = CGCN(self.v_feat, num_user, num_item, dim_C, self.aggr_mode, num_layer, has_act, has_norm)\n            num_model += 1\n\n        #if a_feat is not None:\n            #self.a_gcn = CGCN(self.a_feat, num_user, num_item, dim_C, aggr_mode, num_layer, has_act, has_norm)\n            #num_model += 1\n        \n        if self.t_feat is not None:\n            self.t_gcn = CGCN(self.t_feat, num_user, num_item, dim_C, self.aggr_mode, num_layer, has_act, has_norm, is_word)\n            num_model += 1\n\n        self.model_specific_conf = nn.Parameter(nn.init.xavier_normal_(torch.rand((num_user+num_item, num_model))))\n\n        self.result = nn.init.xavier_normal_(torch.rand((num_user+num_item, dim_x))).to(self.device)\n        \n        \n    def pack_edge_index(self, inter_mat):\n        rows = inter_mat.row\n        cols = inter_mat.col + self.n_users\n        # ndarray([598918, 2]) for ml-imdb\n        return np.column_stack((rows, cols))\n\n\n    def forward(self):\n        weight = None\n        content_rep = None\n        num_modal = 0\n        edge_index, _ = dropout_adj(self.edge_index, p=self.dropout)\n        #print('edge_index: ', edge_index)\n\n        if self.v_feat is not None:\n            num_modal += 1\n            v_rep, weight_v = self.v_gcn(edge_index)\n            weight = weight_v\n            content_rep = v_rep\n            #print('weight_v is: ', weight)\n            #print('content_rep: ',content_rep)\n\n        #if self.a_feat is not None:\n            #num_modal += 1\n            #a_rep, weight_a = self.a_gcn(edge_index)\n            #if weight is  None:\n                #weight = weight_a  \n                #content_rep = a_rep\n            #else:\n                #content_rep = torch.cat((content_rep,a_rep),dim=1)\n                #if self.weight_mode == 'mean':\n                    #weight = weight+ weight_a\n                #else:\n                    #weight = torch.cat((weight, weight_a), dim=1)\n\n        if self.t_feat is not None:\n            num_modal += 1\n            t_rep, weight_t = self.t_gcn(edge_index)\n            if weight is None:\n                weight = weight_t   \n                conetent_rep = t_rep\n            else:\n                content_rep = torch.cat((content_rep,t_rep),dim=1)\n                if self.weight_mode == 'mean':  \n                    weight  = weight+  weight_t\n                else:\n                    weight = torch.cat((weight, weight_t), dim=1)   \n\n        if self.weight_mode == 'mean':\n        \tweight = weight/num_modal\n\n        elif self.weight_mode == 'max':\n        \tweight, _ = torch.max(weight, dim=1)\n        \tweight = weight.view(-1, 1)\n            \n        elif self.weight_mode == 'confid':\n            confidence = torch.cat((self.model_specific_conf[edge_index[0]], self.model_specific_conf[edge_index[1]]), dim=0)\n            weight = weight * confidence\n            weight, _ = torch.max(weight, dim=1)\n            weight = weight.view(-1, 1)\n            #print('weight is: ', weight)\n            \n\n        if self.pruning:\n            weight = torch.relu(weight)\n            \n\n\n        id_rep = self.id_gcn(edge_index, weight)\n        #print('id_rep is: ',id_rep)\n\n        if self.fusion_mode == 'concat':\n            representation = torch.cat((id_rep, content_rep), dim=1)\n            \n        elif self.fusion_mode  == 'id':\n            representation = id_rep\n        elif self.fusion_mode == 'mean':\n            representation = (id_rep+v_rep+a_rep+t_rep)/4\n\n        self.result = representation\n        #print('representation is: ',representation)\n        return representation\n\n    def calculate_loss(self, interaction):\n        batch_users = interaction[0]\n        pos_items = interaction[1] + self.n_users\n        neg_items = interaction[2] + self.n_users\n\n        user_tensor = batch_users.repeat_interleave(2)\n        stacked_items = torch.stack((pos_items, neg_items))\n        item_tensor = stacked_items.t().contiguous().view(-1)\n\n        out = self.forward()\n        user_score = out[user_tensor]\n        item_score = out[item_tensor]\n        score = torch.sum(user_score * item_score, dim=1).view(-1, 2)\n        loss = -torch.mean(torch.log(torch.sigmoid(torch.matmul(score, self.weight))))\n        reg_embedding_loss = (self.id_gcn.id_embedding[user_tensor]**2 + self.id_gcn.id_embedding[item_tensor]**2).mean()\n        if self.v_feat is not None:\n            reg_embedding_loss += (self.v_gcn.preference**2).mean()\n        reg_loss = self.reg_weight * reg_embedding_loss\n        reg_content_loss = torch.zeros(1).cuda() \n        if self.v_feat is not None:\n            reg_content_loss = reg_content_loss + (self.v_gcn.preference[user_tensor]**2).mean()\n        #if self.a_feat is not None:\n            #reg_content_loss = reg_content_loss + (self.a_gcn.preference[user_tensor]**2).mean()\n        if self.t_feat is not None:            \n            reg_content_loss = reg_content_loss + (self.t_gcn.preference[user_tensor]**2).mean()\n\n        reg_confid_loss = (self.model_specific_conf**2).mean()\n        \n        reg_loss = reg_embedding_loss + reg_content_loss\n\n        reg_loss = self.reg_weight * reg_loss\n        #print('loss',loss + reg_loss)\n\n        return loss + reg_loss\n        \n    def full_sort_predict(self, interaction):\n        user_tensor = self.result[:self.n_users]\n        item_tensor = self.result[self.n_users:]\n\n        temp_user_tensor = user_tensor[interaction[0], :]\n        score_matrix = torch.matmul(temp_user_tensor, item_tensor.t())\n        return score_matrix\n\n\n"
  },
  {
    "path": "src/models/itemknncbf.py",
    "content": "# coding: utf-8\n# @email: enoche.chow@gmail.com\nr\"\"\"\nItemKNNCBF\n################################################\nReference:\n    https://github.com/CRIPAC-DIG/LATTICE\n    Are We Really Making Much Progress? A Worrying Analysis of Recent Neural Recommendation Approaches, ACM RecSys'19\n\"\"\"\n\n\nimport os\nimport random\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom common.abstract_recommender import GeneralRecommender\nfrom common.loss import BPRLoss, EmbLoss, L2Loss\nfrom utils.utils import build_sim, compute_normalized_laplacian, build_knn_neighbourhood\n\n\nclass ItemKNNCBF(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(ItemKNNCBF, self).__init__(config, dataset)\n\n        self.knn_k = config['knn_k']\n        self.shrink = config['shrink']\n\n        # load dataset info\n        interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32)\n        values = interaction_matrix.data\n        indices = np.vstack((interaction_matrix.row, interaction_matrix.col))\n\n        i = torch.LongTensor(indices)\n        v = torch.FloatTensor(values)\n        shape = interaction_matrix.shape\n\n        r_matrix = torch.sparse.FloatTensor(i, v, torch.Size(shape)).to(self.device)\n\n        if self.v_feat is not None and self.t_feat is not None:\n            item_fea = torch.cat((self.v_feat, self.t_feat), -1)\n        elif self.v_feat is not None:\n            item_fea = self.v_feat\n        else:\n            item_fea = self.t_feat\n\n        self.dummy_embeddings = nn.Parameter(torch.Tensor([0.5, 0.5]))\n\n        # build item-item sim matrix\n        item_sim = self.build_item_sim_matrix(item_fea)\n        self.scores_matrix = torch.mm(r_matrix, item_sim)\n\n    def build_item_sim_matrix(self, features):\n        i_norm = torch.norm(features, p=2, dim=-1, keepdim=True)\n        ij_norm = i_norm * i_norm.T + self.shrink\n        ij = torch.mm(features, features.T)\n        sim = ij.div(ij_norm)\n\n        # top-k\n        knn_val, knn_ind = torch.topk(sim, self.knn_k, dim=-1)\n        weighted_adjacency_matrix = (torch.zeros_like(sim)).scatter_(-1, knn_ind, knn_val)\n        return weighted_adjacency_matrix\n\n    def build_item_sim_matrix_with_blocks(self, features, block_size=1000):\n        from tqdm import tqdm\n        \"\"\"\n        分块计算物品相似矩阵并显示进度条。\n\n        :param features: Tensor, 物品特征向量，形状为 (num_items, feature_dim)\n        :param block_size: int, 分块大小，默认 1000\n        :return: Tensor, 权重邻接矩阵\n        \"\"\"\n        num_items = features.size(0)\n        i_norm = torch.norm(features, p=2, dim=-1, keepdim=True)\n        shrink = self.shrink\n\n        # 初始化相似矩阵\n        weighted_adjacency_matrix = torch.zeros(num_items, num_items, device=features.device)\n\n        # 分块计算\n        for start_idx in tqdm(range(0, num_items, block_size), desc=\"Computing item similarities\"):\n            end_idx = min(start_idx + block_size, num_items)\n\n            # 当前分块\n            block_features = features[start_idx:end_idx]\n            block_norm = i_norm[start_idx:end_idx]\n\n            # 计算分块与所有物品的相似性\n            ij = torch.mm(block_features, features.T)\n            ij_norm = block_norm * i_norm.T + shrink\n            sim = ij.div(ij_norm)\n\n            # top-k\n            knn_val, knn_ind = torch.topk(sim, self.knn_k, dim=-1)\n            weighted_adjacency_matrix[start_idx:end_idx] = (torch.zeros_like(sim)\n                                                            .scatter_(-1, knn_ind, knn_val))\n\n        return weighted_adjacency_matrix\n\n    def calculate_loss(self, interaction):\n        tmp_v = torch.tensor(0.0)\n        return tmp_v\n\n    def full_sort_predict(self, interaction):\n        user = interaction[0]\n        scores = self.scores_matrix[user]\n\n        return scores\n\n"
  },
  {
    "path": "src/models/lattice.py",
    "content": "# coding: utf-8\n# @email: enoche.chow@gmail.com\nr\"\"\"\nLATTICE\n################################################\nReference:\n    https://github.com/CRIPAC-DIG/LATTICE\n    ACM MM'2021: [Mining Latent Structures for Multimedia Recommendation] \n    https://arxiv.org/abs/2104.09036\n\"\"\"\n\n\nimport os\nimport random\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom common.abstract_recommender import GeneralRecommender\nfrom common.loss import BPRLoss, EmbLoss, L2Loss\nfrom utils.utils import build_sim, compute_normalized_laplacian, build_knn_neighbourhood\n\n\nclass LATTICE(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(LATTICE, self).__init__(config, dataset)\n\n        self.embedding_dim = config['embedding_size']\n        self.feat_embed_dim = config['feat_embed_dim']\n        self.weight_size = config['weight_size']\n        self.knn_k = config['knn_k']\n        self.lambda_coeff = config['lambda_coeff']\n        self.cf_model = config['cf_model']\n        self.n_layers = config['n_layers']\n        self.reg_weight = config['reg_weight']\n        self.build_item_graph = True\n\n        # load dataset info\n        self.interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32)\n        self.norm_adj = self.get_adj_mat()\n        self.norm_adj = self.sparse_mx_to_torch_sparse_tensor(self.norm_adj).float().to(self.device)\n        self.item_adj = None\n\n        self.n_ui_layers = len(self.weight_size)\n        self.weight_size = [self.embedding_dim] + self.weight_size\n        self.user_embedding = nn.Embedding(self.n_users, self.embedding_dim)\n        self.item_id_embedding = nn.Embedding(self.n_items, self.embedding_dim)\n        nn.init.xavier_uniform_(self.user_embedding.weight)\n        nn.init.xavier_uniform_(self.item_id_embedding.weight)\n\n        if config['cf_model'] == 'ngcf':\n            self.GC_Linear_list = nn.ModuleList()\n            self.Bi_Linear_list = nn.ModuleList()\n            self.dropout_list = nn.ModuleList()\n            dropout_list = config['mess_dropout']\n            for i in range(self.n_ui_layers):\n                self.GC_Linear_list.append(nn.Linear(self.weight_size[i], self.weight_size[i + 1]))\n                self.Bi_Linear_list.append(nn.Linear(self.weight_size[i], self.weight_size[i + 1]))\n                self.dropout_list.append(nn.Dropout(dropout_list[i]))\n\n        dataset_path = os.path.abspath(config['data_path'] + config['dataset'])\n        image_adj_file = os.path.join(dataset_path, 'image_adj_{}.pt'.format(self.knn_k))\n        text_adj_file = os.path.join(dataset_path, 'text_adj_{}.pt'.format(self.knn_k))\n\n        if self.v_feat is not None:\n            self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=False)\n            if os.path.exists(image_adj_file):\n                image_adj = torch.load(image_adj_file)\n            else:\n                image_adj = build_sim(self.image_embedding.weight.detach())\n                image_adj = build_knn_neighbourhood(image_adj, topk=self.knn_k)\n                image_adj = compute_normalized_laplacian(image_adj)\n                torch.save(image_adj, image_adj_file)\n            self.image_original_adj = image_adj.cuda()\n\n        if self.t_feat is not None:\n            self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=False)\n            if os.path.exists(text_adj_file):\n                text_adj = torch.load(text_adj_file)\n            else:\n                text_adj = build_sim(self.text_embedding.weight.detach())\n                text_adj = build_knn_neighbourhood(text_adj, topk=self.knn_k)\n                text_adj = compute_normalized_laplacian(text_adj)\n                torch.save(text_adj, text_adj_file)\n            self.text_original_adj = text_adj.cuda()\n\n        if self.v_feat is not None:\n            self.image_trs = nn.Linear(self.v_feat.shape[1], self.feat_embed_dim)\n        if self.t_feat is not None:\n            self.text_trs = nn.Linear(self.t_feat.shape[1], self.feat_embed_dim)\n\n        self.modal_weight = nn.Parameter(torch.Tensor([0.5, 0.5]))\n        self.softmax = nn.Softmax(dim=0)\n\n    def pre_epoch_processing(self):\n        self.build_item_graph = True\n\n    def get_adj_mat(self):\n        adj_mat = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32)\n        adj_mat = adj_mat.tolil()\n        R = self.interaction_matrix.tolil()\n\n        adj_mat[:self.n_users, self.n_users:] = R\n        adj_mat[self.n_users:, :self.n_users] = R.T\n        adj_mat = adj_mat.todok()\n\n        def normalized_adj_single(adj):\n            rowsum = np.array(adj.sum(1))\n\n            d_inv = np.power(rowsum, -1).flatten()\n            d_inv[np.isinf(d_inv)] = 0.\n            d_mat_inv = sp.diags(d_inv)\n\n            norm_adj = d_mat_inv.dot(adj)\n            # norm_adj = adj.dot(d_mat_inv)\n            #print('generate single-normalized adjacency matrix.')\n            return norm_adj.tocoo()\n\n        norm_adj_mat = normalized_adj_single(adj_mat + sp.eye(adj_mat.shape[0]))\n        return norm_adj_mat.tocsr()\n\n    def sparse_mx_to_torch_sparse_tensor(self, sparse_mx):\n        \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n        sparse_mx = sparse_mx.tocoo().astype(np.float32)\n        indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n        values = torch.from_numpy(sparse_mx.data)\n        shape = torch.Size(sparse_mx.shape)\n        return torch.sparse.FloatTensor(indices, values, shape)\n\n    def forward(self, adj, build_item_graph=False):\n        if self.v_feat is not None:\n            image_feats = self.image_trs(self.image_embedding.weight)\n        if self.t_feat is not None:\n            text_feats = self.text_trs(self.text_embedding.weight)\n        if build_item_graph:\n            weight = self.softmax(self.modal_weight)\n\n            if self.v_feat is not None:\n                self.image_adj = build_sim(image_feats)\n                self.image_adj = build_knn_neighbourhood(self.image_adj, topk=self.knn_k)\n                learned_adj = self.image_adj\n                original_adj = self.image_original_adj\n            if self.t_feat is not None:\n                self.text_adj = build_sim(text_feats)\n                self.text_adj = build_knn_neighbourhood(self.text_adj, topk=self.knn_k)\n                learned_adj = self.text_adj\n                original_adj = self.text_original_adj\n            if self.v_feat is not None and self.t_feat is not None:\n                learned_adj = weight[0] * self.image_adj + weight[1] * self.text_adj\n                original_adj = weight[0] * self.image_original_adj + weight[1] * self.text_original_adj\n\n            learned_adj = compute_normalized_laplacian(learned_adj)\n            if self.item_adj is not None:\n                del self.item_adj\n            self.item_adj = (1 - self.lambda_coeff) * learned_adj + self.lambda_coeff * original_adj\n        else:\n            self.item_adj = self.item_adj.detach()\n\n        h = self.item_id_embedding.weight\n        for i in range(self.n_layers):\n            h = torch.mm(self.item_adj, h)\n\n        if self.cf_model == 'ngcf':\n            ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0)\n            all_embeddings = [ego_embeddings]\n            for i in range(self.n_ui_layers):\n                side_embeddings = torch.sparse.mm(adj, ego_embeddings)\n                sum_embeddings = F.leaky_relu(self.GC_Linear_list[i](side_embeddings))\n                bi_embeddings = torch.mul(ego_embeddings, side_embeddings)\n                bi_embeddings = F.leaky_relu(self.Bi_Linear_list[i](bi_embeddings))\n                ego_embeddings = sum_embeddings + bi_embeddings\n                ego_embeddings = self.dropout_list[i](ego_embeddings)\n\n                norm_embeddings = F.normalize(ego_embeddings, p=2, dim=1)\n                all_embeddings += [norm_embeddings]\n\n            all_embeddings = torch.stack(all_embeddings, dim=1)\n            all_embeddings = all_embeddings.mean(dim=1, keepdim=False)\n            u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0)\n            i_g_embeddings = i_g_embeddings + F.normalize(h, p=2, dim=1)\n            return u_g_embeddings, i_g_embeddings\n        elif self.cf_model == 'lightgcn':\n            ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0)\n            all_embeddings = [ego_embeddings]\n            for i in range(self.n_ui_layers):\n                side_embeddings = torch.sparse.mm(adj, ego_embeddings)\n                ego_embeddings = side_embeddings\n                all_embeddings += [ego_embeddings]\n            all_embeddings = torch.stack(all_embeddings, dim=1)\n            all_embeddings = all_embeddings.mean(dim=1, keepdim=False)\n            u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0)\n            i_g_embeddings = i_g_embeddings + F.normalize(h, p=2, dim=1)\n            return u_g_embeddings, i_g_embeddings\n        elif self.cf_model == 'mf':\n            return self.user_embedding.weight, self.item_id_embedding.weight + F.normalize(h, p=2, dim=1)\n\n    def bpr_loss(self, users, pos_items, neg_items):\n        pos_scores = torch.sum(torch.mul(users, pos_items), dim=1)\n        neg_scores = torch.sum(torch.mul(users, neg_items), dim=1)\n\n        regularizer = 1./2*(users**2).sum() + 1./2*(pos_items**2).sum() + 1./2*(neg_items**2).sum()\n        regularizer = regularizer / self.batch_size\n\n        maxi = F.logsigmoid(pos_scores - neg_scores)\n        mf_loss = -torch.mean(maxi)\n\n        emb_loss = self.reg_weight * regularizer\n        reg_loss = 0.0\n        return mf_loss, emb_loss, reg_loss\n\n    def calculate_loss(self, interaction):\n        users = interaction[0]\n        pos_items = interaction[1]\n        neg_items = interaction[2]\n\n        ua_embeddings, ia_embeddings = self.forward(self.norm_adj, build_item_graph=self.build_item_graph)\n        self.build_item_graph = False\n\n        u_g_embeddings = ua_embeddings[users]\n        pos_i_g_embeddings = ia_embeddings[pos_items]\n        neg_i_g_embeddings = ia_embeddings[neg_items]\n\n        batch_mf_loss, batch_emb_loss, batch_reg_loss = self.bpr_loss(u_g_embeddings, pos_i_g_embeddings,\n                                                                      neg_i_g_embeddings)\n        return batch_mf_loss + batch_emb_loss + batch_reg_loss\n\n    def full_sort_predict(self, interaction):\n        user = interaction[0]\n\n        restore_user_e, restore_item_e = self.forward(self.norm_adj, build_item_graph=True)\n        u_embeddings = restore_user_e[user]\n\n        # dot with all item embedding to accelerate\n        scores = torch.matmul(u_embeddings, restore_item_e.transpose(0, 1))\n        return scores\n\n"
  },
  {
    "path": "src/models/layergcn.py",
    "content": "# -*- coding: utf-8 -*-\n\n\nimport numpy as np\nimport scipy.sparse as sp\nimport math\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom models.common.abstract_recommender import GeneralRecommender\nfrom models.common.loss import BPRLoss, EmbLoss, L2Loss\n\nclass LayerGCN(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(LayerGCN, self).__init__(config, dataset)\n\n        # load dataset info\n        self.interaction_matrix = dataset.inter_matrix(\n            form='coo').astype(np.float32)\n\n        # load parameters info\n        self.latent_dim = config['embedding_size']  # int type:the embedding size of lightGCN\n        self.n_layers = config['n_layers']  # int type:the layer num of lightGCN\n        self.reg_weight = config['reg_weight']  # float32 type: the weight decay for l2 normalizaton\n        self.dropout = config['dropout']\n\n        self.n_nodes = self.n_users + self.n_items\n\n        # define layers and loss\n        self.user_embeddings = nn.Parameter(nn.init.xavier_uniform_(torch.empty(self.n_users, self.latent_dim)))\n        self.item_embeddings = nn.Parameter(nn.init.xavier_uniform_(torch.empty(self.n_items, self.latent_dim)))\n\n        # normalized adj matrix\n        self.norm_adj_matrix = self.get_norm_adj_mat().to(self.device)\n        self.masked_adj = None\n        self.forward_adj = None\n        self.pruning_random = False\n\n        # edge prune\n        self.edge_indices, self.edge_values = self.get_edge_info()\n\n        self.mf_loss = BPRLoss()\n        self.reg_loss = L2Loss()\n\n    # def post_epoch_processing(self):\n    #     with torch.no_grad():\n    #         return '=== Layer weights: {}'.format(F.softmax(self.layer_weights.exp(), dim=0))\n\n    def pre_epoch_processing(self):\n        if self.dropout <= .0:\n            self.masked_adj = self.norm_adj_matrix\n            return\n        keep_len = int(self.edge_values.size(0) * (1. - self.dropout))\n        if self.pruning_random:\n            # pruning randomly\n            keep_idx = torch.tensor(random.sample(range(self.edge_values.size(0)), keep_len))\n        else:\n            # pruning edges by pro\n            keep_idx = torch.multinomial(self.edge_values, keep_len)         # prune high-degree nodes\n        self.pruning_random = True ^ self.pruning_random\n        keep_indices = self.edge_indices[:, keep_idx]\n        # norm values\n        keep_values = self._normalize_adj_m(keep_indices, torch.Size((self.n_users, self.n_items)))\n        all_values = torch.cat((keep_values, keep_values))\n        # update keep_indices to users/items+self.n_users\n        keep_indices[1] += self.n_users\n        all_indices = torch.cat((keep_indices, torch.flip(keep_indices, [0])), 1)\n        self.masked_adj = torch.sparse.FloatTensor(all_indices, all_values, self.norm_adj_matrix.shape).to(self.device)\n\n    def _normalize_adj_m(self, indices, adj_size):\n        adj = torch.sparse.FloatTensor(indices, torch.ones_like(indices[0]), adj_size)\n        row_sum = 1e-7 + torch.sparse.sum(adj, -1).to_dense()\n        col_sum = 1e-7 + torch.sparse.sum(adj.t(), -1).to_dense()\n        r_inv_sqrt = torch.pow(row_sum, -0.5)\n        rows_inv_sqrt = r_inv_sqrt[indices[0]]\n        c_inv_sqrt = torch.pow(col_sum, -0.5)\n        cols_inv_sqrt = c_inv_sqrt[indices[1]]\n        values = rows_inv_sqrt * cols_inv_sqrt\n        return values\n\n    def get_edge_info(self):\n        rows = torch.from_numpy(self.interaction_matrix.row)\n        cols = torch.from_numpy(self.interaction_matrix.col)\n        edges = torch.stack([rows, cols]).type(torch.LongTensor)\n        # edge normalized values\n        values = self._normalize_adj_m(edges, torch.Size((self.n_users, self.n_items)))\n        return edges, values\n\n    def get_norm_adj_mat(self):\n        A = sp.dok_matrix((self.n_users + self.n_items,\n                           self.n_users + self.n_items), dtype=np.float32)\n        inter_M = self.interaction_matrix\n        inter_M_t = self.interaction_matrix.transpose()\n        data_dict = dict(zip(zip(inter_M.row, inter_M.col + self.n_users),\n                             [1] * inter_M.nnz))\n        data_dict.update(dict(zip(zip(inter_M_t.row + self.n_users, inter_M_t.col),\n                                  [1] * inter_M_t.nnz)))\n        A._update(data_dict)\n        # norm adj matrix\n        sumArr = (A > 0).sum(axis=1)\n        # add epsilon to avoid Devide by zero Warning\n        diag = np.array(sumArr.flatten())[0] + 1e-7\n        diag = np.power(diag, -0.5)\n        D = sp.diags(diag)\n        L = D * A * D\n        # covert norm_adj matrix to tensor\n        L = sp.coo_matrix(L)\n        row = L.row\n        col = L.col\n        i = torch.LongTensor([row, col])\n        data = torch.FloatTensor(L.data)\n\n        return torch.sparse.FloatTensor(i, data, torch.Size((self.n_nodes, self.n_nodes)))\n\n    def get_ego_embeddings(self):\n        r\"\"\"Get the embedding of users and items and combine to an embedding matrix.\n        Returns:\n            Tensor of the embedding matrix. Shape of [n_items+n_users, embedding_dim]\n        \"\"\"\n        ego_embeddings = torch.cat([self.user_embeddings, self.item_embeddings], 0)\n        return ego_embeddings\n\n    def forward(self):\n        ego_embeddings = self.get_ego_embeddings()\n        all_embeddings = ego_embeddings\n        embeddings_layers = []\n\n        for layer_idx in range(self.n_layers):\n            all_embeddings = torch.sparse.mm(self.forward_adj, all_embeddings)\n            _weights = F.cosine_similarity(all_embeddings, ego_embeddings, dim=-1)\n            all_embeddings = torch.einsum('a,ab->ab', _weights, all_embeddings)\n            embeddings_layers.append(all_embeddings)\n\n        ui_all_embeddings = torch.sum(torch.stack(embeddings_layers, dim=0), dim=0)\n        user_all_embeddings, item_all_embeddings = torch.split(ui_all_embeddings, [self.n_users, self.n_items])\n        return user_all_embeddings, item_all_embeddings\n\n    def bpr_loss(self, u_embeddings, i_embeddings, user, pos_item, neg_item):\n        u_embeddings = u_embeddings[user]\n        posi_embeddings = i_embeddings[pos_item]\n        negi_embeddings = i_embeddings[neg_item]\n\n        # calculate BPR Loss\n        pos_scores = torch.mul(u_embeddings, posi_embeddings).sum(dim=1)\n        neg_scores = torch.mul(u_embeddings, negi_embeddings).sum(dim=1)\n        m = torch.nn.LogSigmoid()\n        bpr_loss = torch.sum(-m(pos_scores - neg_scores))\n        #mf_loss = self.mf_loss(pos_scores, neg_scores)\n\n        return bpr_loss\n\n    def emb_loss(self, user, pos_item, neg_item):\n        # calculate BPR Loss\n        u_ego_embeddings = self.user_embeddings[user]\n        posi_ego_embeddings = self.item_embeddings[pos_item]\n        negi_ego_embeddings = self.item_embeddings[neg_item]\n\n        reg_loss = self.reg_loss(u_ego_embeddings, posi_ego_embeddings, negi_ego_embeddings)\n        return reg_loss\n\n    def calculate_loss(self, interaction):\n        user = interaction[0]\n        pos_item = interaction[1]\n        neg_item = interaction[2]\n\n        self.forward_adj = self.masked_adj\n        user_all_embeddings, item_all_embeddings = self.forward()\n\n        mf_loss = self.bpr_loss(user_all_embeddings, item_all_embeddings, user, pos_item, neg_item)\n        reg_loss = self.emb_loss(user, pos_item, neg_item)\n\n        loss = mf_loss + self.reg_weight * reg_loss\n        return loss\n\n    def full_sort_predict(self, interaction):\n        user = interaction[0]\n\n        self.forward_adj = self.norm_adj_matrix\n        restore_user_e, restore_item_e = self.forward()\n        u_embeddings = restore_user_e[user]\n\n        # dot with all item embedding to accelerate\n        scores = torch.matmul(u_embeddings, restore_item_e.transpose(0, 1))\n        return scores\n\n\n"
  },
  {
    "path": "src/models/lgmrec.py",
    "content": "# coding: utf-8\n# @email: georgeguo.gzq.cn@gmail.com\nr\"\"\"\nLGMRec\n################################################\nReference:\n    https://github.com/georgeguo-cn/LGMRec\n    AAAI'2024: [LGMRec: Local and Global Graph Learning for Multimodal Recommendation]\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom common.abstract_recommender import GeneralRecommender\n\nclass LGMRec(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(LGMRec, self).__init__(config, dataset)\n\n        self.embedding_dim = config['embedding_size']\n        self.feat_embed_dim = config['feat_embed_dim']\n        self.cf_model = config['cf_model']\n        self.n_mm_layer = config['n_mm_layers']\n        self.n_ui_layers = config['n_ui_layers']\n        self.n_hyper_layer = config['n_hyper_layer']\n        self.hyper_num = config['hyper_num']\n        self.keep_rate = config['keep_rate']\n        self.alpha = config['alpha']\n        self.cl_weight = config['cl_weight']\n        self.reg_weight = config['reg_weight']\n        self.tau = 0.2\n\n        self.n_nodes = self.n_users + self.n_items\n\n        self.hgnnLayer = HGNNLayer(self.n_hyper_layer)\n\n        # load dataset info\n        self.interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32)\n        self.adj = self.scipy_matrix_to_sparse_tenser(self.interaction_matrix, torch.Size((self.n_users, self.n_items)))\n        self.num_inters, self.norm_adj = self.get_norm_adj_mat()\n        self.num_inters = torch.FloatTensor(1.0 / (self.num_inters + 1e-7)).to(self.device)\n        \n        # init user and item ID embeddings\n        self.user_embedding = nn.Embedding(self.n_users, self.embedding_dim)\n        self.item_id_embedding = nn.Embedding(self.n_items, self.embedding_dim)\n        nn.init.xavier_uniform_(self.user_embedding.weight)\n        nn.init.xavier_uniform_(self.item_id_embedding.weight)\n\n        self.drop = nn.Dropout(p=1-self.keep_rate)\n\n        # load item modal features and define hyperedges embeddings\n        if self.v_feat is not None:\n            self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=True)\n            self.item_image_trs = nn.Parameter(nn.init.xavier_uniform_(torch.zeros(self.v_feat.shape[1], self.feat_embed_dim)))\n            self.v_hyper = nn.Parameter(nn.init.xavier_uniform_(torch.zeros(self.v_feat.shape[1], self.hyper_num)))\n        if self.t_feat is not None:\n            self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=True)\n            self.item_text_trs = nn.Parameter(nn.init.xavier_uniform_(torch.zeros(self.t_feat.shape[1], self.feat_embed_dim)))\n            self.t_hyper = nn.Parameter(nn.init.xavier_uniform_(torch.zeros(self.t_feat.shape[1], self.hyper_num)))\n            \n    def scipy_matrix_to_sparse_tenser(self, matrix, shape):\n        row = matrix.row\n        col = matrix.col\n        i = torch.LongTensor(np.array([row, col]))\n        data = torch.FloatTensor(matrix.data)\n        return torch.sparse.FloatTensor(i, data, shape).to(self.device)\n    \n    def get_norm_adj_mat(self):\n        A = sp.dok_matrix((self.n_nodes, self.n_nodes), dtype=np.float32)\n        inter_M = self.interaction_matrix\n        inter_M_t = self.interaction_matrix.transpose()\n        data_dict = dict(zip(zip(inter_M.row, inter_M.col + self.n_users), [1] * inter_M.nnz))\n        data_dict.update(dict(zip(zip(inter_M_t.row + self.n_users, inter_M_t.col), [1] * inter_M_t.nnz)))\n        A._update(data_dict)\n        # norm adj matrix\n        sumArr = (A > 0).sum(axis=1)\n        # add epsilon to avoid Devide by zero Warning\n        diag = np.array(sumArr.flatten())[0] + 1e-7\n        diag = np.power(diag, -0.5)\n        D = sp.diags(diag)\n        L = D * A * D\n        # covert norm_adj matrix to tensor\n        L = sp.coo_matrix(L)\n        return sumArr, self.scipy_matrix_to_sparse_tenser(L, torch.Size((self.n_nodes, self.n_nodes)))\n    \n    # collaborative graph embedding\n    def cge(self):\n        if self.cf_model == 'mf':\n            cge_embs = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0)\n        if self.cf_model == 'lightgcn':\n            ego_embeddings = torch.cat((self.user_embedding.weight, self.item_id_embedding.weight), dim=0)\n            cge_embs = [ego_embeddings]\n            for _ in range(self.n_ui_layers):\n                ego_embeddings = torch.sparse.mm(self.norm_adj, ego_embeddings)\n                cge_embs += [ego_embeddings]\n            cge_embs = torch.stack(cge_embs, dim=1)\n            cge_embs = cge_embs.mean(dim=1, keepdim=False)\n        return cge_embs\n    \n    # modality graph embedding\n    def mge(self, str='v'):\n        if str == 'v':\n            item_feats = torch.mm(self.image_embedding.weight, self.item_image_trs)\n        elif str == 't':\n            item_feats = torch.mm(self.text_embedding.weight, self.item_text_trs)\n        user_feats = torch.sparse.mm(self.adj, item_feats) * self.num_inters[:self.n_users]\n        # user_feats = self.user_embedding.weight\n        mge_feats = torch.concat([user_feats, item_feats], dim=0)\n        for _ in range(self.n_mm_layer):\n            mge_feats = torch.sparse.mm(self.norm_adj, mge_feats)\n        return mge_feats\n    \n    def forward(self):\n        # hyperedge dependencies constructing\n        if self.v_feat is not None:\n            iv_hyper = torch.mm(self.image_embedding.weight, self.v_hyper)\n            uv_hyper = torch.mm(self.adj, iv_hyper)\n            iv_hyper = F.gumbel_softmax(iv_hyper, self.tau, dim=1, hard=False)\n            uv_hyper = F.gumbel_softmax(uv_hyper, self.tau, dim=1, hard=False)\n        if self.t_feat is not None:\n            it_hyper = torch.mm(self.text_embedding.weight, self.t_hyper)\n            ut_hyper = torch.mm(self.adj, it_hyper)\n            it_hyper = F.gumbel_softmax(it_hyper, self.tau, dim=1, hard=False)\n            ut_hyper = F.gumbel_softmax(ut_hyper, self.tau, dim=1, hard=False)\n        \n        # CGE: collaborative graph embedding\n        cge_embs = self.cge()\n        \n        if self.v_feat is not None and self.t_feat is not None:\n            # MGE: modal graph embedding\n            v_feats = self.mge('v')\n            t_feats = self.mge('t')\n            # local embeddings = collaborative-related embedding + modality-related embedding\n            mge_embs = F.normalize(v_feats) + F.normalize(t_feats)\n            lge_embs = cge_embs + mge_embs\n            # GHE: global hypergraph embedding\n            uv_hyper_embs, iv_hyper_embs = self.hgnnLayer(self.drop(iv_hyper), self.drop(uv_hyper), cge_embs[self.n_users:])\n            ut_hyper_embs, it_hyper_embs = self.hgnnLayer(self.drop(it_hyper), self.drop(ut_hyper), cge_embs[self.n_users:])\n            av_hyper_embs = torch.concat([uv_hyper_embs, iv_hyper_embs], dim=0)\n            at_hyper_embs = torch.concat([ut_hyper_embs, it_hyper_embs], dim=0)\n            ghe_embs = av_hyper_embs + at_hyper_embs\n            # local embeddings + alpha * global embeddings\n            all_embs = lge_embs + self.alpha * F.normalize(ghe_embs)\n        else:\n            all_embs = cge_embs\n\n        u_embs, i_embs = torch.split(all_embs, [self.n_users, self.n_items], dim=0)\n\n        return u_embs, i_embs, [uv_hyper_embs, iv_hyper_embs, ut_hyper_embs, it_hyper_embs]\n        \n    def bpr_loss(self, users, pos_items, neg_items):\n        pos_scores = torch.sum(torch.mul(users, pos_items), dim=1)\n        neg_scores = torch.sum(torch.mul(users, neg_items), dim=1)\n        bpr_loss = -torch.mean(F.logsigmoid(pos_scores - neg_scores))\n        return bpr_loss\n    \n    def ssl_triple_loss(self, emb1, emb2, all_emb):\n        norm_emb1 = F.normalize(emb1)\n        norm_emb2 = F.normalize(emb2)\n        norm_all_emb = F.normalize(all_emb)\n        pos_score = torch.exp(torch.mul(norm_emb1, norm_emb2).sum(dim=1) / self.tau)\n        ttl_score = torch.exp(torch.matmul(norm_emb1, norm_all_emb.T) / self.tau).sum(dim=1)\n        ssl_loss = -torch.log(pos_score / ttl_score).sum()\n        return ssl_loss\n    \n    def reg_loss(self, *embs):\n        reg_loss = 0\n        for emb in embs:\n            reg_loss += torch.norm(emb, p=2)\n        reg_loss /= embs[-1].shape[0]\n        return reg_loss\n\n    def calculate_loss(self, interaction):\n        ua_embeddings, ia_embeddings, hyper_embeddings = self.forward()\n\n        users = interaction[0]\n        pos_items = interaction[1]\n        neg_items = interaction[2]\n        u_g_embeddings = ua_embeddings[users]\n        pos_i_g_embeddings = ia_embeddings[pos_items]\n        neg_i_g_embeddings = ia_embeddings[neg_items]\n\n        batch_bpr_loss = self.bpr_loss(u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings)\n\n        [uv_embs, iv_embs, ut_embs, it_embs] = hyper_embeddings\n        batch_hcl_loss = self.ssl_triple_loss(uv_embs[users], ut_embs[users], ut_embs) + self.ssl_triple_loss(iv_embs[pos_items], it_embs[pos_items], it_embs)\n        \n        batch_reg_loss = self.reg_loss(u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings)\n\n        loss = batch_bpr_loss + self.cl_weight * batch_hcl_loss + self.reg_weight * batch_reg_loss\n\n        return loss\n\n    def full_sort_predict(self, interaction):\n        user = interaction[0]\n        user_embs, item_embs, _ = self.forward()\n        scores = torch.matmul(user_embs[user], item_embs.T)\n        return scores\n\nclass HGNNLayer(nn.Module):\n    def __init__(self, n_hyper_layer):\n        super(HGNNLayer, self).__init__()\n\n        self.h_layer = n_hyper_layer\n    \n    def forward(self, i_hyper, u_hyper, embeds):\n        i_ret = embeds\n        for _ in range(self.h_layer):\n            lat = torch.mm(i_hyper.T, i_ret)\n            i_ret = torch.mm(i_hyper, lat)\n            u_ret = torch.mm(u_hyper, lat)\n        return u_ret, i_ret\n"
  },
  {
    "path": "src/models/lightgcn.py",
    "content": "# -*- coding: utf-8 -*-\nr\"\"\"\nLightGCN\n################################################\n\nReference:\n    Xiangnan He et al. \"LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation.\" in SIGIR 2020.\n\nReference code:\n    https://github.com/kuandeng/LightGCN\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn as nn\n\nfrom common.abstract_recommender import GeneralRecommender\nfrom common.loss import BPRLoss, EmbLoss\nfrom common.init import xavier_uniform_initialization\n\n\nclass LightGCN(GeneralRecommender):\n    r\"\"\"LightGCN is a GCN-based recommender model.\n\n    LightGCN includes only the most essential component in GCN — neighborhood aggregation — for\n    collaborative filtering. Specifically, LightGCN learns user and item embeddings by linearly\n    propagating them on the user-item interaction graph, and uses the weighted sum of the embeddings\n    learned at all layers as the final embedding.\n\n    We implement the model following the original author with a pairwise training mode.\n    \"\"\"\n    def __init__(self, config, dataset):\n        super(LightGCN, self).__init__(config, dataset)\n\n        # load dataset info\n        self.interaction_matrix = dataset.inter_matrix(\n            form='coo').astype(np.float32)\n\n        # load parameters info\n        self.latent_dim = config['embedding_size']  # int type:the embedding size of lightGCN\n        self.n_layers = config['n_layers']  # int type:the layer num of lightGCN\n        self.reg_weight = config['reg_weight']  # float32 type: the weight decay for l2 normalizaton\n\n        self.mf_loss = BPRLoss()\n        self.reg_loss = EmbLoss()\n\n        self.embedding_dict = self._init_model()\n\n        # generate intermediate data\n        self.norm_adj_matrix = self.get_norm_adj_mat().to(self.device)\n\n        # parameters initialization\n        #self.apply(xavier_uniform_initialization)\n\n    def _init_model(self):\n        initializer = nn.init.xavier_uniform_\n        embedding_dict = nn.ParameterDict({\n            'user_emb': nn.Parameter(initializer(torch.empty(self.n_users, self.latent_dim))),\n            'item_emb': nn.Parameter(initializer(torch.empty(self.n_items, self.latent_dim)))\n        })\n\n        return embedding_dict\n\n    def get_norm_adj_mat(self):\n        r\"\"\"Get the normalized interaction matrix of users and items.\n\n        Construct the square matrix from the training data and normalize it\n        using the laplace matrix.\n\n        .. math::\n            A_{hat} = D^{-0.5} \\times A \\times D^{-0.5}\n\n        Returns:\n            Sparse tensor of the normalized interaction matrix.\n        \"\"\"\n        # build adj matrix\n        A = sp.dok_matrix((self.n_users + self.n_items,\n                           self.n_users + self.n_items), dtype=np.float32)\n        inter_M = self.interaction_matrix\n        inter_M_t = self.interaction_matrix.transpose()\n        data_dict = dict(zip(zip(inter_M.row, inter_M.col+self.n_users),\n                             [1]*inter_M.nnz))\n        data_dict.update(dict(zip(zip(inter_M_t.row+self.n_users, inter_M_t.col),\n                                  [1]*inter_M_t.nnz)))\n        A._update(data_dict)\n        # norm adj matrix\n        sumArr = (A > 0).sum(axis=1)\n        # add epsilon to avoid Devide by zero Warning\n        diag = np.array(sumArr.flatten())[0] + 1e-7\n        diag = np.power(diag, -0.5)\n        D = sp.diags(diag)\n        L = D * A * D\n        # covert norm_adj matrix to tensor\n        L = sp.coo_matrix(L)\n        row = L.row\n        col = L.col\n        i = torch.LongTensor([row, col])\n        data = torch.FloatTensor(L.data)\n        SparseL = torch.sparse.FloatTensor(i, data, torch.Size(L.shape))\n        return SparseL\n\n    def get_ego_embeddings(self):\n        r\"\"\"Get the embedding of users and items and combine to an embedding matrix.\n\n        Returns:\n            Tensor of the embedding matrix. Shape of [n_items+n_users, embedding_dim]\n        \"\"\"\n        # user_embeddings = self.user_embedding.weight\n        # item_embeddings = self.item_embedding.weight\n        # ego_embeddings = torch.cat([user_embeddings, item_embeddings], dim=0)\n        ego_embeddings = torch.cat([self.embedding_dict['user_emb'], self.embedding_dict['item_emb']], 0)\n        return ego_embeddings\n\n    def forward(self):\n        all_embeddings = self.get_ego_embeddings()\n        embeddings_list = [all_embeddings]\n\n        for layer_idx in range(self.n_layers):\n            all_embeddings = torch.sparse.mm(self.norm_adj_matrix, all_embeddings)\n            embeddings_list.append(all_embeddings)\n        lightgcn_all_embeddings = torch.stack(embeddings_list, dim=1)\n        lightgcn_all_embeddings = torch.mean(lightgcn_all_embeddings, dim=1)\n\n        user_all_embeddings = lightgcn_all_embeddings[:self.n_users, :]\n        item_all_embeddings = lightgcn_all_embeddings[self.n_users:, :]\n\n        return user_all_embeddings, item_all_embeddings\n\n    def calculate_loss(self, interaction):\n        user = interaction[0]\n        pos_item = interaction[1]\n        neg_item = interaction[2]\n\n        user_all_embeddings, item_all_embeddings = self.forward()\n\n        u_embeddings = user_all_embeddings[user, :]\n        posi_embeddings = item_all_embeddings[pos_item, :]\n        negi_embeddings = item_all_embeddings[neg_item, :]\n\n        # calculate BPR Loss\n        pos_scores = torch.mul(u_embeddings, posi_embeddings).sum(dim=1)\n        neg_scores = torch.mul(u_embeddings, negi_embeddings).sum(dim=1)\n        mf_loss = self.mf_loss(pos_scores, neg_scores)\n\n        # calculate BPR Loss\n        u_ego_embeddings = self.embedding_dict['user_emb'][user, :]\n        posi_ego_embeddings = self.embedding_dict['item_emb'][pos_item, :]\n        negi_ego_embeddings = self.embedding_dict['item_emb'][neg_item, :]\n\n        reg_loss = self.reg_loss(u_ego_embeddings, posi_ego_embeddings, negi_ego_embeddings)\n        loss = mf_loss + self.reg_weight * reg_loss\n\n        return loss\n\n    def full_sort_predict(self, interaction):\n        user = interaction[0]\n        restore_user_e, restore_item_e = self.forward()\n        u_embeddings = restore_user_e[user, :]\n\n        # dot with all item embedding to accelerate\n        scores = torch.matmul(u_embeddings, restore_item_e.transpose(0, 1))\n\n        return scores\n"
  },
  {
    "path": "src/models/mgcn.py",
    "content": "# coding: utf-8\n# @email: y463213402@gmail.com\nr\"\"\"\nMGCN\n################################################\nReference:\n    https://github.com/demonph10/MGCN\n    ACM MM'2023: [Multi-View Graph Convolutional Network for Multimedia Recommendation]\n\"\"\"\n\nimport os\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom common.abstract_recommender import GeneralRecommender\nfrom utils.utils import build_sim, compute_normalized_laplacian, build_knn_neighbourhood, build_knn_normalized_graph\n\n\nclass MGCN(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(MGCN, self).__init__(config, dataset)\n        self.sparse = True\n        self.cl_loss = config['cl_loss']\n        self.n_ui_layers = config['n_ui_layers']\n        self.embedding_dim = config['embedding_size']\n        self.knn_k = config['knn_k']\n        self.n_layers = config['n_layers']\n        self.reg_weight = config['reg_weight']\n\n        # load dataset info\n        self.interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32)\n\n        self.user_embedding = nn.Embedding(self.n_users, self.embedding_dim)\n        self.item_id_embedding = nn.Embedding(self.n_items, self.embedding_dim)\n        nn.init.xavier_uniform_(self.user_embedding.weight)\n        nn.init.xavier_uniform_(self.item_id_embedding.weight)\n\n        dataset_path = os.path.abspath(config['data_path'] + config['dataset'])\n        image_adj_file = os.path.join(dataset_path, 'image_adj_{}_{}.pt'.format(self.knn_k, self.sparse))\n        text_adj_file = os.path.join(dataset_path, 'text_adj_{}_{}.pt'.format(self.knn_k, self.sparse))\n\n        self.norm_adj = self.get_adj_mat()\n        self.R = self.sparse_mx_to_torch_sparse_tensor(self.R).float().to(self.device)\n        self.norm_adj = self.sparse_mx_to_torch_sparse_tensor(self.norm_adj).float().to(self.device)\n\n\n        if self.v_feat is not None:\n            self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=False)\n            if os.path.exists(image_adj_file):\n                image_adj = torch.load(image_adj_file)\n            else:\n                image_adj = build_sim(self.image_embedding.weight.detach())\n                image_adj = build_knn_normalized_graph(image_adj, topk=self.knn_k, is_sparse=self.sparse,\n                                                       norm_type='sym')\n                torch.save(image_adj, image_adj_file)\n            self.image_original_adj = image_adj.cuda()\n\n        if self.t_feat is not None:\n            self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=False)\n            if os.path.exists(text_adj_file):\n                text_adj = torch.load(text_adj_file)\n            else:\n                text_adj = build_sim(self.text_embedding.weight.detach())\n                text_adj = build_knn_normalized_graph(text_adj, topk=self.knn_k, is_sparse=self.sparse, norm_type='sym')\n                torch.save(text_adj, text_adj_file)\n            self.text_original_adj = text_adj.cuda()\n\n        if self.v_feat is not None:\n            self.image_trs = nn.Linear(self.v_feat.shape[1], self.embedding_dim)\n        if self.t_feat is not None:\n            self.text_trs = nn.Linear(self.t_feat.shape[1], self.embedding_dim)\n\n        self.softmax = nn.Softmax(dim=-1)\n\n        self.query_common = nn.Sequential(\n            nn.Linear(self.embedding_dim, self.embedding_dim),\n            nn.Tanh(),\n            nn.Linear(self.embedding_dim, 1, bias=False)\n        )\n\n        self.gate_v = nn.Sequential(\n            nn.Linear(self.embedding_dim, self.embedding_dim),\n            nn.Sigmoid()\n        )\n\n        self.gate_t = nn.Sequential(\n            nn.Linear(self.embedding_dim, self.embedding_dim),\n            nn.Sigmoid()\n        )\n\n        self.gate_image_prefer = nn.Sequential(\n            nn.Linear(self.embedding_dim, self.embedding_dim),\n            nn.Sigmoid()\n        )\n\n        self.gate_text_prefer = nn.Sequential(\n            nn.Linear(self.embedding_dim, self.embedding_dim),\n            nn.Sigmoid()\n        )\n\n        self.tau = 0.5\n\n    def pre_epoch_processing(self):\n        pass\n\n    def get_adj_mat(self):\n        adj_mat = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32)\n        adj_mat = adj_mat.tolil()\n        R = self.interaction_matrix.tolil()\n\n        adj_mat[:self.n_users, self.n_users:] = R\n        adj_mat[self.n_users:, :self.n_users] = R.T\n        adj_mat = adj_mat.todok()\n\n        def normalized_adj_single(adj):\n            rowsum = np.array(adj.sum(1))\n\n            d_inv = np.power(rowsum, -0.5).flatten()\n            d_inv[np.isinf(d_inv)] = 0.\n            d_mat_inv = sp.diags(d_inv)\n\n            norm_adj = d_mat_inv.dot(adj_mat)\n            norm_adj = norm_adj.dot(d_mat_inv)\n            # norm_adj = adj.dot(d_mat_inv)\n            # print('generate single-normalized adjacency matrix.')\n            return norm_adj.tocoo()\n\n        # norm_adj_mat = normalized_adj_single(adj_mat + sp.eye(adj_mat.shape[0]))\n        norm_adj_mat = normalized_adj_single(adj_mat)\n        norm_adj_mat = norm_adj_mat.tolil()\n        self.R = norm_adj_mat[:self.n_users, self.n_users:]\n        # norm_adj_mat = normalized_adj_single(adj_mat + sp.eye(adj_mat.shape[0]))\n        return norm_adj_mat.tocsr()\n\n    def sparse_mx_to_torch_sparse_tensor(self, sparse_mx):\n        \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n        sparse_mx = sparse_mx.tocoo().astype(np.float32)\n        indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n        values = torch.from_numpy(sparse_mx.data)\n        shape = torch.Size(sparse_mx.shape)\n        return torch.sparse.FloatTensor(indices, values, shape)\n\n    def forward(self, adj, train=False):\n        if self.v_feat is not None:\n            image_feats = self.image_trs(self.image_embedding.weight)\n        if self.t_feat is not None:\n            text_feats = self.text_trs(self.text_embedding.weight)\n\n        # Behavior-Guided Purifier\n        image_item_embeds = torch.multiply(self.item_id_embedding.weight, self.gate_v(image_feats))\n        text_item_embeds = torch.multiply(self.item_id_embedding.weight, self.gate_t(text_feats))\n\n        # User-Item View\n        item_embeds = self.item_id_embedding.weight\n        user_embeds = self.user_embedding.weight\n        ego_embeddings = torch.cat([user_embeds, item_embeds], dim=0)\n        all_embeddings = [ego_embeddings]\n        for i in range(self.n_ui_layers):\n            side_embeddings = torch.sparse.mm(adj, ego_embeddings)\n            ego_embeddings = side_embeddings\n            all_embeddings += [ego_embeddings]\n        all_embeddings = torch.stack(all_embeddings, dim=1)\n        all_embeddings = all_embeddings.mean(dim=1, keepdim=False)\n        content_embeds = all_embeddings\n\n        # Item-Item View\n        if self.sparse:\n            for i in range(self.n_layers):\n                image_item_embeds = torch.sparse.mm(self.image_original_adj, image_item_embeds)\n        else:\n            for i in range(self.n_layers):\n                image_item_embeds = torch.mm(self.image_original_adj, image_item_embeds)\n        image_user_embeds = torch.sparse.mm(self.R, image_item_embeds)\n        image_embeds = torch.cat([image_user_embeds, image_item_embeds], dim=0)\n        if self.sparse:\n            for i in range(self.n_layers):\n                text_item_embeds = torch.sparse.mm(self.text_original_adj, text_item_embeds)\n        else:\n            for i in range(self.n_layers):\n                text_item_embeds = torch.mm(self.text_original_adj, text_item_embeds)\n        text_user_embeds = torch.sparse.mm(self.R, text_item_embeds)\n        text_embeds = torch.cat([text_user_embeds, text_item_embeds], dim=0)\n\n        # Behavior-Aware Fuser\n        att_common = torch.cat([self.query_common(image_embeds), self.query_common(text_embeds)], dim=-1)\n        weight_common = self.softmax(att_common)\n        common_embeds = weight_common[:, 0].unsqueeze(dim=1) * image_embeds + weight_common[:, 1].unsqueeze(\n            dim=1) * text_embeds\n        sep_image_embeds = image_embeds - common_embeds\n        sep_text_embeds = text_embeds - common_embeds\n\n        image_prefer = self.gate_image_prefer(content_embeds)\n        text_prefer = self.gate_text_prefer(content_embeds)\n        sep_image_embeds = torch.multiply(image_prefer, sep_image_embeds)\n        sep_text_embeds = torch.multiply(text_prefer, sep_text_embeds)\n        side_embeds = (sep_image_embeds + sep_text_embeds + common_embeds) / 3\n\n        all_embeds = content_embeds + side_embeds\n\n        all_embeddings_users, all_embeddings_items = torch.split(all_embeds, [self.n_users, self.n_items], dim=0)\n\n        if train:\n            return all_embeddings_users, all_embeddings_items, side_embeds, content_embeds\n\n        return all_embeddings_users, all_embeddings_items\n\n    def bpr_loss(self, users, pos_items, neg_items):\n        pos_scores = torch.sum(torch.mul(users, pos_items), dim=1)\n        neg_scores = torch.sum(torch.mul(users, neg_items), dim=1)\n\n        regularizer = 1. / 2 * (users ** 2).sum() + 1. / 2 * (pos_items ** 2).sum() + 1. / 2 * (neg_items ** 2).sum()\n        regularizer = regularizer / self.batch_size\n\n        maxi = F.logsigmoid(pos_scores - neg_scores)\n        mf_loss = -torch.mean(maxi)\n\n        emb_loss = self.reg_weight * regularizer\n        reg_loss = 0.0\n        return mf_loss, emb_loss, reg_loss\n\n    def InfoNCE(self, view1, view2, temperature):\n        view1, view2 = F.normalize(view1, dim=1), F.normalize(view2, dim=1)\n        pos_score = (view1 * view2).sum(dim=-1)\n        pos_score = torch.exp(pos_score / temperature)\n        ttl_score = torch.matmul(view1, view2.transpose(0, 1))\n        ttl_score = torch.exp(ttl_score / temperature).sum(dim=1)\n        cl_loss = -torch.log(pos_score / ttl_score)\n        return torch.mean(cl_loss)\n\n    def calculate_loss(self, interaction):\n        users = interaction[0]\n        pos_items = interaction[1]\n        neg_items = interaction[2]\n\n        ua_embeddings, ia_embeddings, side_embeds, content_embeds = self.forward(\n            self.norm_adj, train=True)\n\n        u_g_embeddings = ua_embeddings[users]\n        pos_i_g_embeddings = ia_embeddings[pos_items]\n        neg_i_g_embeddings = ia_embeddings[neg_items]\n\n        batch_mf_loss, batch_emb_loss, batch_reg_loss = self.bpr_loss(u_g_embeddings, pos_i_g_embeddings,\n                                                                      neg_i_g_embeddings)\n\n        side_embeds_users, side_embeds_items = torch.split(side_embeds, [self.n_users, self.n_items], dim=0)\n        content_embeds_user, content_embeds_items = torch.split(content_embeds, [self.n_users, self.n_items], dim=0)\n        cl_loss = self.InfoNCE(side_embeds_items[pos_items], content_embeds_items[pos_items], 0.2) + self.InfoNCE(\n            side_embeds_users[users], content_embeds_user[users], 0.2)\n\n        return batch_mf_loss + batch_emb_loss + batch_reg_loss + self.cl_loss * cl_loss\n\n    def full_sort_predict(self, interaction):\n        user = interaction[0]\n\n        restore_user_e, restore_item_e = self.forward(self.norm_adj)\n        u_embeddings = restore_user_e[user]\n\n        # dot with all item embedding to accelerate\n        scores = torch.matmul(u_embeddings, restore_item_e.transpose(0, 1))\n        return scores"
  },
  {
    "path": "src/models/mmgcn.py",
    "content": "# coding: utf-8\n\"\"\"\nMMGCN: Multi-modal Graph Convolution Network for Personalized Recommendation of Micro-video. \nIn ACM MM`19,\n\"\"\"\n\nimport os\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.utils import remove_self_loops, add_self_loops, degree\nimport torch_geometric\n\nfrom common.abstract_recommender import GeneralRecommender\nfrom common.loss import BPRLoss, EmbLoss\nfrom common.init import xavier_uniform_initialization\n\n\nclass MMGCN(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(MMGCN, self).__init__(config, dataset)\n        self.num_user = self.n_users\n        self.num_item = self.n_items\n        num_user = self.n_users\n        num_item = self.n_items\n        dim_x = config['embedding_size']\n        num_layer = config['n_layers']\n        batch_size = config['train_batch_size']         # not used\n        self.aggr_mode = 'mean'\n        self.concate = 'False'\n        has_id = True\n        self.weight = torch.tensor([[1.0], [-1.0]]).to(self.device)\n        self.reg_weight = config['reg_weight']\n\n        # packing interaction in training into edge_index\n        train_interactions = dataset.inter_matrix(form='coo').astype(np.float32)\n        edge_index = torch.tensor(self.pack_edge_index(train_interactions), dtype=torch.long)\n        self.edge_index = edge_index.t().contiguous().to(self.device)\n        self.edge_index = torch.cat((self.edge_index, self.edge_index[[1, 0]]), dim=1)\n        self.num_modal = 0\n\n        if self.v_feat is not None:\n            self.v_gcn = GCN(self.edge_index, batch_size, num_user, num_item, self.v_feat.size(1), dim_x, self.aggr_mode,\n                             self.concate, num_layer=num_layer, has_id=has_id, dim_latent=256, device=self.device)\n            self.num_modal += 1\n\n        if self.t_feat is not None:\n            self.t_gcn = GCN(self.edge_index, batch_size, num_user, num_item, self.t_feat.size(1), dim_x,\n                             self.aggr_mode, self.concate, num_layer=num_layer, has_id=has_id, device=self.device)\n            self.num_modal += 1\n\n        self.id_embedding = nn.init.xavier_normal_(torch.rand((num_user+num_item, dim_x), requires_grad=True)).to(self.device)\n        self.result = nn.init.xavier_normal_(torch.rand((num_user + num_item, dim_x))).to(self.device)\n\n    def pack_edge_index(self, inter_mat):\n        rows = inter_mat.row\n        cols = inter_mat.col + self.n_users\n        # ndarray([598918, 2]) for ml-imdb\n        return np.column_stack((rows, cols))\n\n    def forward(self):\n        representation = None\n        if self.v_feat is not None:\n            representation = self.v_gcn(self.v_feat, self.id_embedding)\n        if self.t_feat is not None:\n            if representation is None:\n                representation = self.t_gcn(self.t_feat, self.id_embedding)\n            else:\n                representation += self.t_gcn(self.t_feat, self.id_embedding)\n\n        representation /= self.num_modal\n\n        self.result = representation\n        return representation\n\n    def calculate_loss(self, interaction):\n        batch_users = interaction[0]\n        pos_items = interaction[1] + self.n_users\n        neg_items = interaction[2] + self.n_users\n\n        user_tensor = batch_users.repeat_interleave(2)\n        stacked_items = torch.stack((pos_items, neg_items))\n        item_tensor = stacked_items.t().contiguous().view(-1)\n\n        out = self.forward()\n        user_score = out[user_tensor]\n        item_score = out[item_tensor]\n        score = torch.sum(user_score * item_score, dim=1).view(-1, 2)\n        loss = -torch.mean(torch.log(torch.sigmoid(torch.matmul(score, self.weight))))\n        reg_embedding_loss = (self.id_embedding[user_tensor]**2 + self.id_embedding[item_tensor]**2).mean()\n        if self.v_feat is not None:\n            reg_embedding_loss += (self.v_gcn.preference**2).mean()\n        reg_loss = self.reg_weight * reg_embedding_loss\n        return loss + reg_loss\n\n    def full_sort_predict(self, interaction):\n        user_tensor = self.result[:self.n_users]\n        item_tensor = self.result[self.n_users:]\n\n        temp_user_tensor = user_tensor[interaction[0], :]\n        score_matrix = torch.matmul(temp_user_tensor, item_tensor.t())\n        return score_matrix\n\n\nclass GCN(torch.nn.Module):\n    def __init__(self, edge_index, batch_size, num_user, num_item, dim_feat, dim_id, aggr_mode, concate, num_layer,\n                 has_id, dim_latent=None, device='cpu'):\n        super(GCN, self).__init__()\n        self.batch_size = batch_size\n        self.num_user = num_user\n        self.num_item = num_item\n        self.dim_id = dim_id\n        self.dim_feat = dim_feat\n        self.dim_latent = dim_latent\n        self.edge_index = edge_index\n        self.aggr_mode = aggr_mode\n        self.concate = concate\n        self.num_layer = num_layer\n        self.has_id = has_id\n        self.device = device\n\n        if self.dim_latent:\n            self.preference = nn.init.xavier_normal_(torch.rand((num_user, self.dim_latent), requires_grad=True)).to(self.device)\n            #self.preference = nn.Parameter(nn.init.xavier_normal_(torch.rand((num_user, self.dim_latent))))\n\n            self.MLP = nn.Linear(self.dim_feat, self.dim_latent)\n            self.conv_embed_1 = BaseModel(self.dim_latent, self.dim_latent, aggr=self.aggr_mode)\n            nn.init.xavier_normal_(self.conv_embed_1.weight)\n            self.linear_layer1 = nn.Linear(self.dim_latent, self.dim_id)\n            nn.init.xavier_normal_(self.linear_layer1.weight)\n            self.g_layer1 = nn.Linear(self.dim_latent + self.dim_id, self.dim_id) if self.concate else nn.Linear(\n                self.dim_latent, self.dim_id)\n            nn.init.xavier_normal_(self.g_layer1.weight)\n\n        else:\n            self.preference = nn.init.xavier_normal_(torch.rand((num_user, self.dim_feat), requires_grad=True)).to(self.device)\n            #self.preference = nn.Parameter(nn.init.xavier_normal_(torch.rand((num_user, self.dim_feat))))\n\n            self.conv_embed_1 = BaseModel(self.dim_feat, self.dim_feat, aggr=self.aggr_mode)\n            nn.init.xavier_normal_(self.conv_embed_1.weight)\n            self.linear_layer1 = nn.Linear(self.dim_feat, self.dim_id)\n            nn.init.xavier_normal_(self.linear_layer1.weight)\n            self.g_layer1 = nn.Linear(self.dim_feat + self.dim_id, self.dim_id) if self.concate else nn.Linear(\n                self.dim_feat, self.dim_id)\n            nn.init.xavier_normal_(self.g_layer1.weight)\n\n        self.conv_embed_2 = BaseModel(self.dim_id, self.dim_id, aggr=self.aggr_mode)\n        nn.init.xavier_normal_(self.conv_embed_2.weight)\n        self.linear_layer2 = nn.Linear(self.dim_id, self.dim_id)\n        nn.init.xavier_normal_(self.linear_layer2.weight)\n        self.g_layer2 = nn.Linear(self.dim_id + self.dim_id, self.dim_id) if self.concate else nn.Linear(self.dim_id,\n                                                                                                         self.dim_id)\n\n        self.conv_embed_3 = BaseModel(self.dim_id, self.dim_id, aggr=self.aggr_mode)\n        nn.init.xavier_normal_(self.conv_embed_3.weight)\n        self.linear_layer3 = nn.Linear(self.dim_id, self.dim_id)\n        nn.init.xavier_normal_(self.linear_layer3.weight)\n        self.g_layer3 = nn.Linear(self.dim_id + self.dim_id, self.dim_id) if self.concate else nn.Linear(self.dim_id,\n                                                                                                         self.dim_id)\n\n    def forward(self, features, id_embedding):\n        temp_features = self.MLP(features) if self.dim_latent else features\n\n        x = torch.cat((self.preference, temp_features), dim=0)\n        x = F.normalize(x)\n\n        h = F.leaky_relu(self.conv_embed_1(x, self.edge_index))  # equation 1\n        x_hat = F.leaky_relu(self.linear_layer1(x)) + id_embedding if self.has_id else F.leaky_relu(\n            self.linear_layer1(x))  # equation 5\n        x = F.leaky_relu(self.g_layer1(torch.cat((h, x_hat), dim=1))) if self.concate else F.leaky_relu(\n            self.g_layer1(h) + x_hat)\n\n        h = F.leaky_relu(self.conv_embed_2(x, self.edge_index))  # equation 1\n        x_hat = F.leaky_relu(self.linear_layer2(x)) + id_embedding if self.has_id else F.leaky_relu(\n            self.linear_layer2(x))  # equation 5\n        x = F.leaky_relu(self.g_layer2(torch.cat((h, x_hat), dim=1))) if self.concate else F.leaky_relu(\n            self.g_layer2(h) + x_hat)\n\n        h = F.leaky_relu(self.conv_embed_3(x, self.edge_index))  # equation 1\n        x_hat = F.leaky_relu(self.linear_layer3(x)) + id_embedding if self.has_id else F.leaky_relu(\n            self.linear_layer3(x))  # equation 5\n        x = F.leaky_relu(self.g_layer3(torch.cat((h, x_hat), dim=1))) if self.concate else F.leaky_relu(\n            self.g_layer3(h) + x_hat)\n\n        return x\n\n\nclass BaseModel(MessagePassing):\n    def __init__(self, in_channels, out_channels, normalize=True, bias=True, aggr='add', **kwargs):\n        super(BaseModel, self).__init__(aggr=aggr, **kwargs)\n        self.aggr = aggr\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.normalize = normalize\n        self.weight = nn.Parameter(torch.Tensor(self.in_channels, out_channels))\n\n        self.reset_parameters()\n\n    def reset_parameters(self):\n        torch_geometric.nn.inits.uniform(self.in_channels, self.weight)\n\n    def forward(self, x, edge_index, size=None):\n        x = torch.matmul(x, self.weight)\n        return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x)\n\n    def message(self, x_j, edge_index, size):\n        return x_j\n\n    def update(self, aggr_out):\n        return aggr_out\n\n    def __repr(self):\n        return '{}({},{})'.format(self.__class__.__name__, self.in_channels, self.out_channels)"
  },
  {
    "path": "src/models/mvgae.py",
    "content": "# coding: utf-8\n\"\"\"\nhttps://github.com/jing-1/MVGAE\nPaper: Multi-Modal Variational Graph Auto-Encoder for Recommendation Systems\nIEEE TMM'21\n\"\"\"\n\nimport os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Parameter\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.utils import remove_self_loops, add_self_loops, degree\nfrom torch_geometric.nn.inits import uniform\nfrom torch.autograd import Variable\n\nfrom common.abstract_recommender import GeneralRecommender\nfrom common.loss import BPRLoss, EmbLoss\nfrom common.init import xavier_uniform_initialization\n\nEPS = 1e-15\nMAX_LOGVAR = 10\n\n\nclass MVGAE(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(MVGAE, self).__init__(config, dataset)\n        self.experts = ProductOfExperts()\n        #self.dataset = config['dataset']\n        self.dataset = 'amazon'\n        self.batch_size = config['train_batch_size']\n        self.num_user = self.n_users\n        self.num_item = self.n_items\n        num_user = self.n_users\n        num_item = self.n_items\n        num_layer = config['n_layers']\n        self.aggr_mode = 'mean'\n        self.concate = False\n        self.dim_x = config['embedding_size']\n        self.beta = config['beta']\n        self.collaborative = nn.init.xavier_normal_(torch.rand((num_item, self.dim_x), requires_grad=True)).to(self.device)\n        # packing interaction in training into edge_index\n        train_interactions = dataset.inter_matrix(form='coo').astype(np.float32)\n        edge_index = torch.tensor(self.pack_edge_index(train_interactions), dtype=torch.long)\n        self.edge_index = edge_index.t().contiguous().to(self.device)\n        self.edge_index = torch.cat((self.edge_index, self.edge_index[[1, 0]]), dim=1)\n        if self.v_feat is not None:\n            self.v_gcn = GCN(self.device, self.v_feat, self.edge_index, self.batch_size, num_user, num_item, self.dim_x,\n                             self.aggr_mode, self.concate, num_layer=num_layer, dim_latent=128)  # 256)\n        if self.t_feat is not None:\n            self.t_gcn = GCN(self.device, self.t_feat, self.edge_index, self.batch_size, num_user, num_item, self.dim_x,\n                             self.aggr_mode, self.concate, num_layer=num_layer, dim_latent=128)  # 256)\n        self.c_gcn = GCN(self.device, self.collaborative, self.edge_index, self.batch_size, num_user, num_item,\n                         self.dim_x,\n                         self.aggr_mode, self.concate, num_layer=num_layer, dim_latent=128)  # 256)\n        self.result_embed = nn.init.xavier_normal_(torch.rand((num_user + num_item, self.dim_x))).to(self.device)\n\n    def pack_edge_index(self, inter_mat):\n        rows = inter_mat.row\n        cols = inter_mat.col + self.n_users\n        # ndarray([598918, 2]) for ml-imdb\n        return np.column_stack((rows, cols))\n\n    def reparametrize(self, mu, logvar):\n        logvar = logvar.clamp(max=MAX_LOGVAR)\n        if self.training:\n            return mu + torch.randn_like(logvar) * 0.1 * torch.exp(logvar.mul(0.5))\n        else:\n            return mu\n\n    def dot_product_decode_neg(self, z, user, neg_items, sigmoid=True):\n        # multiple negs, for comparison with MAML\n        # print('user shape: ',user,user.shape)\n        users = torch.unsqueeze(user, 1)\n        # print('users shape: ', users,users.shape)\n        neg_items = neg_items\n        # print('neg_items: ', neg_items,neg_items.shape)\n        # print('neg_items.size(1):', neg_items.size(0))\n        re_users = users.repeat(1, neg_items.size(0))\n\n        neg_values = torch.sum(z[re_users] * z[neg_items], -1)\n        max_neg_value = torch.max(neg_values, dim=-1).values\n        return torch.sigmoid(max_neg_value) if sigmoid else max_neg_value\n\n    def dot_product_decode(self, z, edge_index, sigmoid=True):\n        value = torch.sum(z[edge_index[0]] * z[edge_index[1]], dim=1)\n        return torch.sigmoid(value) if sigmoid else value\n\n    def forward(self):\n        v_mu, v_logvar = self.v_gcn()\n        t_mu, t_logvar = self.t_gcn()\n        c_mu, c_logvar = self.c_gcn()\n        self.v_logvar = v_logvar\n        self.t_logvar = t_logvar\n        self.v_mu = v_mu\n        self.t_mu = t_mu\n        mu = torch.stack([v_mu, t_mu], dim=0)\n        logvar = torch.stack([v_logvar, t_logvar], dim=0)\n\n        pd_mu, pd_logvar, _ = self.experts(mu, logvar)\n        del mu\n        del logvar\n\n        mu = torch.stack([pd_mu, c_mu], dim=0)\n        logvar = torch.stack([pd_logvar, c_logvar], dim=0)\n\n        pd_mu, pd_logvar, _ = self.experts(mu, logvar)\n        del mu\n        del logvar\n        z = self.reparametrize(pd_mu, pd_logvar)\n\n        # for more sparse dataset like amazon, use signoid to regulization. for alishop,dont use sigmoid for better results\n        if 'amazon' in self.dataset:\n            self.result_embed = torch.sigmoid(pd_mu)\n        else:\n            self.result_embed = pd_mu\n        return pd_mu, pd_logvar, z, v_mu, v_logvar, t_mu, t_logvar, c_mu, c_logvar\n\n    def recon_loss(self, z, pos_edge_index, user, neg_items):\n        r\"\"\"Given latent variables :obj:`z`, computes the binary cross\n        entropy loss for positive edges :obj:`pos_edge_index` and negative\n        sampled edges.\n        Args:\n            z (Tensor): The latent space :math:`\\mathbf{Z}`.\n            pos_edge_index (LongTensor): The positive edges to train against.\n        \"\"\"\n        # for more sparse dataset like amazon, use signoid to regulization. for alishop,dont use sigmoid for better results\n        if 'amazon' in self.dataset:\n            z = torch.sigmoid(z)\n\n        pos_scores = self.dot_product_decode(z, pos_edge_index, sigmoid=True)\n        neg_scores = self.dot_product_decode_neg(z, user, neg_items, sigmoid=True)\n        loss = -torch.sum(torch.log2(torch.sigmoid(pos_scores - neg_scores)))\n        return loss\n\n    def kl_loss(self, mu, logvar):\n        r\"\"\"Computes the KL loss, either for the passed arguments :obj:`mu`\n        and :obj:`logvar`, or based on latent variables from last encoding.\n        Args:\n            mu (Tensor, optional): The latent space for :math:`\\mu`. If set to\n                :obj:`None`, uses the last computation of :math:`mu`.\n                (default: :obj:`None`)\n            logvar (Tensor, optional): The latent space for\n                :math:`\\log\\sigma^2`.  If set to :obj:`None`, uses the last\n                computation of :math:`\\log\\sigma^2`.(default: :obj:`None`)\n        \"\"\"\n        logvar = logvar.clamp(max=MAX_LOGVAR)\n        return -0.5 * torch.mean(\n            torch.sum(1 + logvar - mu ** 2 - logvar.exp(), dim=1))\n\n    def calculate_loss(self, interaction):\n        user = interaction[0]\n        pos_items = interaction[1]\n        neg_items = interaction[2]\n        #user = user.long()\n        #pos_items = pos_items.long()\n        #neg_items = torch.tensor(neg_items, dtype=torch.long)\n        pos_edge_index = torch.stack([user, pos_items], dim=0)\n        pd_mu, pd_logvar, z, v_mu, v_logvar, t_mu, t_logvar, c_mu, c_logvar = self.forward()\n\n        z_v = self.reparametrize(v_mu, v_logvar)\n        z_t = self.reparametrize(t_mu, t_logvar)\n        z_c = self.reparametrize(c_mu, c_logvar)\n        recon_loss = self.recon_loss(z, pos_edge_index, user, neg_items)\n        kl_loss = self.kl_loss(pd_mu, pd_logvar)\n        loss_multi = recon_loss + self.beta * kl_loss\n        loss_v = self.recon_loss(z_v, pos_edge_index, user, neg_items) + self.beta * self.kl_loss(v_mu, v_logvar)\n        loss_t = self.recon_loss(z_t, pos_edge_index, user, neg_items) + self.beta * self.kl_loss(t_mu, t_logvar)\n        loss_c = self.recon_loss(z_c, pos_edge_index, user, neg_items) + self.beta* self.kl_loss(c_mu, c_logvar)\n        return loss_multi + loss_v + loss_t + loss_c\n\n    def full_sort_predict(self, interaction):\n        user_tensor = self.result_embed[:self.n_users]\n        item_tensor = self.result_embed[self.n_users:]\n\n        temp_user_tensor = user_tensor[interaction[0], :]\n        score_matrix = torch.matmul(temp_user_tensor, item_tensor.t())\n        return score_matrix\n\n\nclass GCN(torch.nn.Module):\n    def __init__(self, device, features, edge_index, batch_size, num_user, num_item, dim_id, aggr_mode, concate,\n                 num_layer, dim_latent=None):\n        super(GCN, self).__init__()\n        self.device = device\n        self.batch_size = batch_size\n        self.num_user = num_user\n        self.num_item = num_item\n        self.dim_id = dim_id\n        self.dim_feat = features.size(1)\n        self.dim_latent = dim_latent\n        self.edge_index = edge_index\n        self.features = features\n        self.aggr_mode = aggr_mode\n        self.concate = concate\n        self.num_layer = num_layer\n\n        if self.dim_latent:\n            self.preference = nn.init.xavier_normal_(torch.rand((num_user, self.dim_latent), requires_grad=True)).to(\n                self.device)\n            self.MLP = nn.Linear(self.dim_feat, self.dim_latent)\n            nn.init.xavier_normal_(self.MLP.weight)\n            self.conv_embed_1 = BaseModel(self.dim_latent, self.dim_id, aggr=self.aggr_mode)\n            nn.init.xavier_normal_(self.conv_embed_1.weight)\n            self.linear_layer1 = nn.Linear(self.dim_latent, self.dim_id)\n            nn.init.xavier_normal_(self.linear_layer1.weight)\n            self.g_layer1 = nn.Linear(self.dim_id + self.dim_id, self.dim_id) if self.concate else nn.Linear(\n                self.dim_id, self.dim_id)\n            nn.init.xavier_normal_(self.g_layer1.weight)\n\n        else:\n            self.preference = nn.init.xavier_normal_(torch.rand((num_user, self.dim_feat), requires_grad=True)).to(\n                self.device)\n            self.conv_embed_1 = BaseModel(self.dim_feat, self.dim_id, aggr=self.aggr_mode)\n            nn.init.xavier_normal_(self.conv_embed_1.weight)\n            self.linear_layer1 = nn.Linear(self.dim_feat, self.dim_id)\n            nn.init.xavier_normal_(self.linear_layer1.weight)\n            self.g_layer1 = nn.Linear(self.dim_feat + self.dim_id, self.dim_id) if self.concate else nn.Linear(\n                self.dim_id, self.dim_id)\n            nn.init.xavier_normal_(self.g_layer1.weight)\n\n        self.conv_embed_2 = BaseModel(self.dim_id, self.dim_id, aggr=self.aggr_mode)\n        nn.init.xavier_normal_(self.conv_embed_2.weight)\n        self.linear_layer2 = nn.Linear(self.dim_id, self.dim_id)\n        nn.init.xavier_normal_(self.linear_layer2.weight)\n        self.g_layer2 = nn.Linear(self.dim_id + self.dim_id, self.dim_id) if self.concate else nn.Linear(self.dim_id,\n                                                                                                         self.dim_id)\n        # nn.init.xavier_normal_(self.g_layer2.weight)\n\n        self.conv_embed_4 = BaseModel(self.dim_id, self.dim_id, aggr=self.aggr_mode)\n        nn.init.xavier_normal_(self.conv_embed_4.weight)\n        self.linear_layer4 = nn.Linear(self.dim_id, self.dim_id)\n        nn.init.xavier_normal_(self.linear_layer4.weight)\n        self.g_layer4 = nn.Linear(self.dim_id + self.dim_id, self.dim_id) if self.concate else nn.Linear(self.dim_id,\n                                                                                                         self.dim_id)\n        nn.init.xavier_normal_(self.g_layer4.weight)\n        self.conv_embed_5 = BaseModel(self.dim_id, self.dim_id, aggr=self.aggr_mode)\n        nn.init.xavier_normal_(self.conv_embed_5.weight)\n        self.linear_layer5 = nn.Linear(self.dim_id, self.dim_id)\n        nn.init.xavier_normal_(self.linear_layer5.weight)\n        self.g_layer5 = nn.Linear(self.dim_id + self.dim_id, self.dim_id) if self.concate else nn.Linear(self.dim_id,\n                                                                                                         self.dim_id)\n        nn.init.xavier_normal_(self.g_layer5.weight)\n\n    def forward(self):\n        # print(self.features)\n        # print(self.MLP.weight)\n        temp_features = self.MLP(self.features) if self.dim_latent else self.features\n        # print('temp feature: ',temp_features)\n        x = torch.cat((self.preference, temp_features), dim=0)\n        # print(x)\n        x = F.normalize(x).to(self.device)\n        # print(x)\n\n        if self.num_layer > 0:\n            h = F.leaky_relu(self.conv_embed_1(x, self.edge_index))\n            x_hat = F.leaky_relu(self.linear_layer1(x))\n            x = F.leaky_relu(self.g_layer1(torch.cat((h, x_hat), dim=1))) if self.concate else F.leaky_relu(\n                self.g_layer1(h))\n            del x_hat\n            del h\n\n        if self.num_layer > 1:\n            h = F.leaky_relu(self.conv_embed_2(x, self.edge_index))\n            x_hat = F.leaky_relu(self.linear_layer2(x))\n            x = F.leaky_relu(self.g_layer2(torch.cat((h, x_hat), dim=1))) if self.concate else F.leaky_relu(\n                self.g_layer2(h))\n            del h\n            del x_hat\n\n        mu = F.leaky_relu(self.conv_embed_4(x, self.edge_index))\n        x_hat = F.leaky_relu(self.linear_layer4(x))\n        mu = self.g_layer4(torch.cat((mu, x_hat), dim=1)) if self.concate else self.g_layer4(mu) + x_hat\n        del x_hat\n\n        logvar = F.leaky_relu(self.conv_embed_5(x, self.edge_index))\n        x_hat = F.leaky_relu(self.linear_layer5(x))\n        logvar = self.g_layer5(torch.cat((logvar, x_hat), dim=1)) if self.concate else self.g_layer5(logvar) + x_hat\n        del x_hat\n        return mu, logvar\n\n\nclass ProductOfExperts(torch.nn.Module):\n    def __init__(self):\n        super(ProductOfExperts, self).__init__()\n        \"\"\"Return parameters for product of independent experts.\n        See https://arxiv.org/pdf/1410.7827.pdf for equations.\n        @param mu: M x D for M experts\n        @param logvar: M x D for M experts\n        \"\"\"\n\n    def forward(self, mu, logvar, eps=1e-8):\n        var = torch.exp(logvar) + eps\n        # precision of i-th Gaussian expert at point x\n        T = 1. / var\n        pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)\n        pd_var = 1. / torch.sum(T, dim=0)\n        pd_logvar = torch.log(pd_var)\n        return pd_mu, pd_logvar, pd_var\n\n\nclass BaseModel(MessagePassing):\n    def __init__(self, in_channels, out_channels, normalize=True, bias=True, aggr='add', **kwargs):\n        super(BaseModel, self).__init__(aggr=aggr, **kwargs)\n        self.aggr = aggr\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.normalize = normalize\n        self.weight = Parameter(torch.Tensor(self.in_channels, out_channels))\n        if bias:\n            self.bias = Parameter(torch.Tensor(out_channels))\n        else:\n            self.register_parameter('bias', None)\n        self.reset_parameters()\n\n    def reset_parameters(self):\n        uniform(self.in_channels, self.weight)\n        uniform(self.in_channels, self.bias)\n\n    def forward(self, x, edge_index, size=None):\n        if size is None:\n            edge_index, _ = remove_self_loops(edge_index)\n            edge_index, _ = add_self_loops(edge_index.long(), num_nodes=x.size(0))\n            edge_index = edge_index.long()\n        x = x.unsqueeze(-1) if x.dim() == 1 else x\n        x = torch.matmul(x, self.weight)\n        return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x)\n\n    def message(self, x_j, edge_index, size):\n        if self.aggr == 'add':\n            row, col = edge_index\n            deg = degree(row, size[0], dtype=x_j.dtype)\n            deg_inv_sqrt = deg.pow(-0.5)\n            norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]\n            return norm.view(-1, 1) * x_j\n        return x_j\n\n    def update(self, aggr_out):\n        if self.bias is not None:\n            aggr_out = aggr_out + self.bias\n        if self.normalize:\n            aggr_out = F.normalize(aggr_out, p=2, dim=-1)\n        return F.dropout(aggr_out, p=0.1, training=self.training)\n\n    def __repr(self):\n        return '{}({},{})'.format(self.__class__.__name__, self.in_channels, self.out_channels)\n"
  },
  {
    "path": "src/models/pgl.py",
    "content": "# coding: utf-8\n# @email: y463213402@gmail.com\nr\"\"\"\nPGL\n################################################\nReference:\n    https://github.com/demonph10/PGL\n    AAAI'2025: [Mind Individual Information! Principal Graph Learning for Multimedia Recommendation]\n\"\"\"\n\nimport os\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom common.abstract_recommender import GeneralRecommender\nfrom sparsesvd import sparsesvd\n\n\nclass PGL(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(PGL, self).__init__(config, dataset)\n        self.mode = config['mode']\n\n        self.embedding_dim = config['embedding_size']\n        self.feat_embed_dim = config['feat_embed_dim']\n        self.knn_k = config['knn_k']\n        self.lambda_coeff = config['lambda_coeff']\n        self.n_layers = config['n_mm_layers']\n        self.n_ui_layers = config['n_ui_layers']\n        self.reg_weight = config['reg_weight']\n        self.mm_image_weight = config['mm_image_weight']\n\n        self.n_nodes = self.n_users + self.n_items\n\n        self.sub_graph, self.mm_adj = None, None\n\n        # load dataset info\n        self.interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32)\n        self.norm_adj = self.get_norm_adj_mat().to(self.device)\n        self.edge_indices, self.edge_values = self.get_edge_info()\n        self.edge_indices, self.edge_values = self.edge_indices.to(self.device), self.edge_values.to(self.device)\n        self.edge_full_indices = torch.arange(self.edge_values.size(0)).to(self.device)\n\n        self.user_text = nn.Embedding(self.n_users, self.embedding_dim)\n        self.user_image = nn.Embedding(self.n_users, self.embedding_dim)\n        nn.init.xavier_uniform_(self.user_image.weight)\n        nn.init.xavier_uniform_(self.user_text.weight)\n\n        dataset_path = os.path.abspath(config['data_path'] + config['dataset'])\n        mm_adj_file = os.path.join(dataset_path,'mm_adj_freedomdsp_{}_{}.pt'.format(self.knn_k, int(10 * self.mm_image_weight)))\n\n        if self.v_feat is not None:\n            self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=False)\n            self.image_trs = nn.Linear(self.v_feat.shape[1], self.feat_embed_dim)\n        if self.t_feat is not None:\n            self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=False)\n            self.text_trs = nn.Linear(self.t_feat.shape[1], self.feat_embed_dim)\n\n        if os.path.exists(mm_adj_file):\n            self.mm_adj = torch.load(mm_adj_file)\n        else:\n            if self.v_feat is not None:\n                indices, image_adj = self.get_knn_adj_mat(self.image_embedding.weight.detach())\n                self.mm_adj = image_adj\n            if self.t_feat is not None:\n                indices, text_adj = self.get_knn_adj_mat(self.text_embedding.weight.detach())\n                self.mm_adj = text_adj\n            if self.v_feat is not None and self.t_feat is not None:\n                self.mm_adj = self.mm_image_weight * image_adj + (1.0 - self.mm_image_weight) * text_adj\n                del text_adj\n                del image_adj\n            torch.save(self.mm_adj, mm_adj_file)\n        self.dropoutf = nn.Dropout(config['dropout'])\n\n    def sparse_mx_to_torch_sparse_tensor(self, sparse_mx):\n        \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n        sparse_mx = sparse_mx.tocoo().astype(np.float32)\n        indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n        values = torch.from_numpy(sparse_mx.data)\n        shape = torch.Size(sparse_mx.shape)\n        return torch.sparse.FloatTensor(indices, values, shape)\n\n    def get_knn_adj_mat(self, mm_embeddings):\n        context_norm = mm_embeddings.div(torch.norm(mm_embeddings, p=2, dim=-1, keepdim=True))\n        sim = torch.mm(context_norm, context_norm.transpose(1, 0))\n        _, knn_ind = torch.topk(sim, self.knn_k, dim=-1)\n        adj_size = sim.size()\n        del sim\n        # construct sparse adj\n        indices0 = torch.arange(knn_ind.shape[0]).to(self.device)\n        indices0 = torch.unsqueeze(indices0, 1)\n        indices0 = indices0.expand(-1, self.knn_k)\n        indices = torch.stack((torch.flatten(indices0), torch.flatten(knn_ind)), 0)\n        # norm\n        return indices, self.compute_normalized_laplacian(indices, adj_size)\n\n    def compute_normalized_laplacian(self, indices, adj_size):\n        adj = torch.sparse.FloatTensor(indices, torch.ones_like(indices[0]), adj_size)\n        row_sum = 1e-7 + torch.sparse.sum(adj, -1).to_dense()\n        r_inv_sqrt = torch.pow(row_sum, -0.5)\n        rows_inv_sqrt = r_inv_sqrt[indices[0]]\n        cols_inv_sqrt = r_inv_sqrt[indices[1]]\n        values = rows_inv_sqrt * cols_inv_sqrt\n        return torch.sparse.FloatTensor(indices, values, adj_size)\n\n    def get_norm_adj_mat(self):\n        A = sp.dok_matrix((self.n_users + self.n_items,\n                           self.n_users + self.n_items), dtype=np.float32)\n        inter_M = self.interaction_matrix\n        inter_M_t = self.interaction_matrix.transpose()\n        data_dict = dict(zip(zip(inter_M.row, inter_M.col + self.n_users),\n                             [1] * inter_M.nnz))\n        data_dict.update(dict(zip(zip(inter_M_t.row + self.n_users, inter_M_t.col),\n                                  [1] * inter_M_t.nnz)))\n        A._update(data_dict)\n        # norm adj matrix\n        sumArr = (A > 0).sum(axis=1)\n        # add epsilon to avoid Devide by zero Warning\n        diag = np.array(sumArr.flatten())[0] + 1e-7\n        diag = np.power(diag, -0.5)\n        D = sp.diags(diag)\n        L = D * A * D\n        # covert norm_adj matrix to tensor\n        L = sp.coo_matrix(L)\n        row = L.row\n        col = L.col\n        i = torch.LongTensor(np.array([row, col]))\n        data = torch.FloatTensor(L.data)\n        if self.mode == 'global':\n            self.sub_graph = self.global_subgraph_extraction(L)\n            self.sub_graph = self.sparse_mx_to_torch_sparse_tensor(self.sub_graph).to(self.device)\n\n        return torch.sparse.FloatTensor(i, data, torch.Size((self.n_nodes, self.n_nodes)))\n\n    def global_subgraph_extraction(self, adj):\n        norm_adj = adj.tocsc()\n        ut, s, vt = sparsesvd(norm_adj, self.embedding_dim)\n\n        # Get the top and bottom 25% of singular values\n        num_top_bottom = int(0.25 * self.embedding_dim)\n        top_singular_values = s[:num_top_bottom]\n        bottom_singular_values = s[-num_top_bottom:]\n\n        # Compute the product of the top and bottom singular values\n        product_singular_values = top_singular_values * bottom_singular_values\n\n        # Construct the sparse matrix from the product of singular values\n        product_matrix = np.diag(product_singular_values)\n        product_sparse_matrix = ut.T[:, :num_top_bottom] @ product_matrix @ vt[:num_top_bottom, :]\n        product_sparse_matrix = sp.csr_matrix(product_sparse_matrix * (abs(product_sparse_matrix) >= 1e-3))\n        return product_sparse_matrix\n\n    def alignment(self, x, y):\n        user, item = self.interaction_matrix.nonzero()\n        x, y = F.normalize(x, dim=-1), F.normalize(y, dim=-1)\n        return (x[user] - y[item]).norm(p=2, dim=1).pow(2).mean()\n\n    def uniformity(self, x, t=2):\n        x = F.normalize(x, dim=-1)\n        return torch.pdist(x, p=2).pow(2).mul(-t).exp().mean().log()\n\n    def save(self):\n        pass\n\n    def pre_epoch_processing(self):\n        if self.mode == 'local':\n            # degree-sensitive edge pruning\n            degree_len = int(self.edge_values.size(0) * 0.3)\n            degree_idx = torch.multinomial(self.edge_values, degree_len)\n            # random sample\n            keep_indices = self.edge_indices[:, degree_idx]\n            # norm values\n            keep_values = self._normalize_adj_m(keep_indices, torch.Size((self.n_users, self.n_items)))\n            all_values = torch.cat((keep_values, keep_values))\n            # update keep_indices to users/items+self.n_users\n            keep_indices[1] += self.n_users\n            all_indices = torch.cat((keep_indices, torch.flip(keep_indices, [0])), 1)\n            self.sub_graph = torch.sparse.FloatTensor(all_indices, all_values, self.norm_adj.shape).to(self.device)\n\n    def _normalize_adj_m(self, indices, adj_size):\n        adj = torch.sparse.FloatTensor(indices, torch.ones_like(indices[0]), adj_size)\n        row_sum = 1e-7 + torch.sparse.sum(adj, -1).to_dense()\n        col_sum = 1e-7 + torch.sparse.sum(adj.t(), -1).to_dense()\n        r_inv_sqrt = torch.pow(row_sum, -0.5)\n        rows_inv_sqrt = r_inv_sqrt[indices[0]]\n        c_inv_sqrt = torch.pow(col_sum, -0.5)\n        cols_inv_sqrt = c_inv_sqrt[indices[1]]\n        values = rows_inv_sqrt * cols_inv_sqrt\n        return values\n\n    def get_edge_info(self):\n        rows = torch.from_numpy(self.interaction_matrix.row)\n        cols = torch.from_numpy(self.interaction_matrix.col)\n        edges = torch.stack([rows, cols]).type(torch.LongTensor)\n        # edge normalized values\n        values = self._normalize_adj_m(edges, torch.Size((self.n_users, self.n_items)))\n        return edges, values\n\n    def forward(self, adj):\n        if self.v_feat is not None:\n            image_feats = self.image_trs(self.image_embedding.weight)\n        if self.t_feat is not None:\n            text_feats = self.text_trs(self.text_embedding.weight)\n\n        image_feats, text_feats = F.normalize(image_feats), F.normalize(text_feats)\n        user_embeds = torch.cat([self.user_image.weight, self.user_text.weight], dim=1)\n        item_embeds = torch.cat([image_feats, text_feats], dim=1)\n\n        h = item_embeds\n        for i in range(self.n_layers):\n            h = torch.sparse.mm(self.mm_adj, h)\n\n        ego_embeddings = torch.cat((user_embeds, item_embeds), dim=0)\n        all_embeddings = [ego_embeddings]\n        for i in range(self.n_ui_layers):\n            side_embeddings = torch.sparse.mm(adj, ego_embeddings)\n            ego_embeddings = side_embeddings\n            all_embeddings += [ego_embeddings]\n        all_embeddings = torch.stack(all_embeddings, dim=1)\n        all_embeddings = all_embeddings.mean(dim=1, keepdim=False)\n        u_g_embeddings, i_g_embeddings = torch.split(all_embeddings, [self.n_users, self.n_items], dim=0)\n        return u_g_embeddings, i_g_embeddings + h\n\n    def bpr_loss(self, users, pos_items, neg_items):\n        pos_scores = torch.sum(torch.mul(users, pos_items), dim=1)\n        neg_scores = torch.sum(torch.mul(users, neg_items), dim=1)\n\n        maxi = F.logsigmoid(pos_scores - neg_scores)\n        mf_loss = -torch.mean(maxi)\n\n        return mf_loss\n\n    def InfoNCE(self, view1, view2, temperature):\n        view1, view2 = F.normalize(view1, dim=1), F.normalize(view2, dim=1)\n        pos_score = (view1 * view2).sum(dim=-1)\n        pos_score = torch.exp(pos_score / temperature)\n        ttl_score = torch.matmul(view1, view2.transpose(0, 1))\n        ttl_score = torch.exp(ttl_score / temperature).sum(dim=1)\n        cl_loss = -torch.log(pos_score / ttl_score)\n        return torch.mean(cl_loss)\n\n    def calculate_loss(self, interaction):\n        users = interaction[0]\n        pos_items = interaction[1]\n        neg_items = interaction[2]\n\n        ua_embeddings, ia_embeddings = self.forward(self.sub_graph)\n\n        u_g_embeddings = ua_embeddings[users]\n        pos_i_g_embeddings = ia_embeddings[pos_items]\n        neg_i_g_embeddings = ia_embeddings[neg_items]\n\n        batch_mf_loss = self.bpr_loss(u_g_embeddings, pos_i_g_embeddings, neg_i_g_embeddings)\n        cl_loss = (self.InfoNCE(self.dropoutf(u_g_embeddings), self.dropoutf(u_g_embeddings), 0.2)\n                   + self.InfoNCE(self.dropoutf(pos_i_g_embeddings), self.dropoutf(pos_i_g_embeddings), 0.2)) / 2\n        return batch_mf_loss + self.reg_weight * cl_loss\n\n    def full_sort_predict(self, interaction):\n        user = interaction[0]\n\n        restore_user_e, restore_item_e = self.forward(self.norm_adj)\n        u_embeddings = restore_user_e[user]\n\n        # dot with all item embedding to accelerate\n        scores = torch.matmul(u_embeddings, restore_item_e.transpose(0, 1))\n        return scores\n"
  },
  {
    "path": "src/models/selfcfed_lgn.py",
    "content": "# -*- coding: utf-8 -*-\n# @Time   : 2021/05/17\n# @Author : Zhou xin\n# @Email  : enoche.chow@gmail.com\n\nr\"\"\"\n################################################\nSelf-supervised CF\n\nUsing the same implementation of LightGCN in BUIR\nAdding regularization on embeddings\n\n\nSELFCF_{ed}: embedding dropout\n\"\"\"\n\nimport scipy.sparse as sp\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom common.encoders import LightGCN_Encoder\nfrom common.abstract_recommender import GeneralRecommender\nfrom common.loss import BPRLoss, EmbLoss, L2Loss\n\n\nclass SELFCFED_LGN(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(SELFCFED_LGN, self).__init__(config, dataset)\n        self.user_count = self.n_users\n        self.item_count = self.n_items\n        self.latent_size = config['embedding_size']\n        self.dropout = config['dropout']\n        self.reg_weight = config['reg_weight']\n\n        self.online_encoder = LightGCN_Encoder(config, dataset)\n        self.predictor = nn.Linear(self.latent_size, self.latent_size)\n        self.reg_loss = L2Loss()\n\n    def forward(self, inputs):\n        u_online, i_online = self.online_encoder(inputs)\n        with torch.no_grad():\n            u_target, i_target = u_online.clone(), i_online.clone()\n            u_target.detach()\n            i_target.detach()\n            u_target = F.dropout(u_target, self.dropout)\n            i_target = F.dropout(i_target, self.dropout)\n\n        return u_online, u_target, i_online, i_target\n\n    @torch.no_grad()\n    def get_embedding(self):\n        u_online, i_online = self.online_encoder.get_embedding()\n        return self.predictor(u_online), u_online, self.predictor(i_online), i_online\n\n    def loss_fn(self, p, z):  # negative cosine similarity\n        return - F.cosine_similarity(p, z.detach(), dim=-1).mean()\n\n    def calculate_loss(self, interaction):\n        u_online, u_target, i_online, i_target = self.forward(interaction)\n        reg_loss = self.reg_loss(u_online, i_online)\n\n        u_online, i_online = self.predictor(u_online), self.predictor(i_online)\n\n        loss_ui = self.loss_fn(u_online, i_target)/2\n        loss_iu = self.loss_fn(i_online, u_target)/2\n\n        return loss_ui + loss_iu + self.reg_weight * reg_loss\n\n    def full_sort_predict(self, interaction):\n        user = interaction[0]\n        u_online, u_target, i_online, i_target = self.get_embedding()\n        score_mat_ui = torch.matmul(u_online[user], i_target.transpose(0, 1))\n        score_mat_iu = torch.matmul(u_target[user], i_online.transpose(0, 1))\n        scores = score_mat_ui + score_mat_iu\n\n        return scores\n"
  },
  {
    "path": "src/models/slmrec.py",
    "content": "# coding: utf-8\n#\n# Updated by enoche\n# Paper: Self-supervised Learning for Multimedia Recommendation\n# Github: https://github.com/zltao/SLMRec\n#\n\nimport torch\nfrom torch import nn\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom torch_scatter import scatter\nfrom sklearn.cluster import KMeans\nfrom common.abstract_recommender import GeneralRecommender\n\n## Only visual + text features\n##\n\nclass SLMRec(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(SLMRec, self).__init__(config, dataset)\n        self.a_feat = None      # no audio feature\n        self.config = config\n        self.infonce_criterion = nn.CrossEntropyLoss()\n        self.__init_weight(dataset)\n\n    def __init_weight(self, dataset):\n        self.num_users = self.n_users\n        self.num_items = self.n_items\n        self.latent_dim = self.config['recdim']\n        self.n_layers = self.config['layer_num']\n        self.mm_fusion_mode = self.config['mm_fusion_mode']\n        self.temp = self.config['temp']\n\n        self.create_u_embeding_i()\n\n        self.all_items = self.all_users = None\n\n        train_interactions = dataset.inter_matrix(form='csr').astype(np.float32)\n        coo = self.create_adj_mat(train_interactions).tocoo()\n        indices = torch.LongTensor([coo.row.tolist(), coo.col.tolist()])\n        self.norm_adj = torch.sparse.FloatTensor(indices, torch.FloatTensor(coo.data), coo.shape)\n        self.norm_adj = self.norm_adj.to(self.device)\n        self.f = nn.Sigmoid()\n\n        if self.config[\"ssl_task\"] == \"FAC\":\n            # Fine and Coarse\n            self.g_i_iv = nn.Linear(self.latent_dim, self.latent_dim)\n            self.g_v_iv = nn.Linear(self.latent_dim, self.latent_dim)\n            self.g_iv_iva = nn.Linear(self.latent_dim, self.latent_dim)\n            self.g_a_iva = nn.Linear(self.latent_dim, self.latent_dim)\n            self.g_iva_ivat = nn.Linear(self.latent_dim, self.latent_dim // 2)\n            self.g_t_ivat = nn.Linear(self.latent_dim, self.latent_dim // 2)\n            nn.init.xavier_uniform_(self.g_i_iv.weight)\n            nn.init.xavier_uniform_(self.g_v_iv.weight)\n            nn.init.xavier_uniform_(self.g_iv_iva.weight)\n            nn.init.xavier_uniform_(self.g_a_iva.weight)\n            nn.init.xavier_uniform_(self.g_iva_ivat.weight)\n            nn.init.xavier_uniform_(self.g_t_ivat.weight)\n            self.ssl_temp = self.config[\"ssl_temp\"]\n        elif self.config[\"ssl_task\"] in [\"FD\", \"FD+FM\"]:\n            # Feature dropout\n            self.ssl_criterion = nn.CrossEntropyLoss()\n            self.ssl_temp = self.config[\"ssl_temp\"]\n            self.dropout_rate = self.config[\"dropout_rate\"]\n            self.dropout = nn.Dropout(p=self.dropout_rate)\n        elif self.config[\"ssl_task\"] == \"FM\":\n            # Feature Masking\n            self.ssl_criterion = nn.CrossEntropyLoss()\n            self.ssl_temp = self.config[\"ssl_temp\"]\n\n    def compute(self):\n        users_emb = self.embedding_user.weight\n        items_emb = self.embedding_item.weight\n\n        if self.v_feat is not None:\n            self.v_dense_emb = self.v_dense(self.v_feat)  # v=>id\n        if self.config[\"dataset\"] != \"kwai\":\n            if self.a_feat is not None:\n                self.a_dense_emb = self.a_dense(self.a_feat)  # a=>id\n            if self.t_feat is not None:\n                self.t_dense_emb = self.t_dense(self.t_feat)  # t=>id\n\n        def compute_graph(u_emb, i_emb):\n            all_emb = torch.cat([u_emb, i_emb])\n            embs = [all_emb]\n            g_droped = self.norm_adj\n            for _ in range(self.n_layers):\n                all_emb = torch.sparse.mm(g_droped, all_emb)\n                embs.append(all_emb)\n            embs = torch.stack(embs, dim=1)\n            light_out = torch.mean(embs, dim=1)\n            return light_out\n\n        self.i_emb = compute_graph(users_emb, items_emb)\n        self.i_emb_u, self.i_emb_i = torch.split(self.i_emb, [self.num_users, self.num_items])\n        self.v_emb = compute_graph(users_emb, self.v_dense_emb)\n        self.v_emb_u, self.v_emb_i = torch.split(self.v_emb, [self.num_users, self.num_items])\n        if self.config[\"dataset\"] != \"kwai\":\n            if self.a_feat is not None:\n                self.a_emb = compute_graph(users_emb, self.a_dense_emb)\n                self.a_emb_u, self.a_emb_i = torch.split(self.a_emb, [self.num_users, self.num_items])\n            if self.t_feat is not None:\n                self.t_emb = compute_graph(users_emb, self.t_dense_emb)\n                self.t_emb_u, self.t_emb_i = torch.split(self.t_emb, [self.num_users, self.num_items])\n\n        # multi - modal features fusion\n        if self.config[\"dataset\"] == \"kwai\":\n            user = self.embedding_user_after_GCN(\n                self.mm_fusion([self.i_emb_u, self.v_emb_u]))\n            item = self.embedding_item_after_GCN(\n                self.mm_fusion([self.i_emb_i, self.v_emb_i]))\n        else:\n            user = self.embedding_user_after_GCN(self.mm_fusion([self.i_emb_u, self.v_emb_u, self.t_emb_u]))\n            item = self.embedding_item_after_GCN(self.mm_fusion([self.i_emb_i, self.v_emb_i, self.t_emb_i]))\n\n        return user, item\n\n    def feature_dropout(self, users_idx, items_idx):\n        users_emb = self.embedding_user.weight\n        items_emb = self.embedding_item.weight\n\n        v_dense = self.v_dense_emb\n        if self.config[\"data.input.dataset\"] != \"kwai\":\n            a_dense = self.a_dense_emb\n            t_dense = self.t_dense_emb\n\n        def compute_graph(u_emb, i_emb):\n            all_emb = torch.cat([u_emb, i_emb])\n            ego_emb_sub_1 = all_emb\n            ego_emb_sub_2 = all_emb\n            # embs = [all_emb]\n            embs_sub_1 = [ego_emb_sub_1]\n            embs_sub_2 = [ego_emb_sub_2]\n\n            g_droped = self.norm_adj\n\n            for _ in range(self.n_layers):\n                ego_emb_sub_1 = self.dropout(torch.sparse.mm(g_droped, ego_emb_sub_1))\n                ego_emb_sub_2 = self.dropout(torch.sparse.mm(g_droped, ego_emb_sub_2))\n                embs_sub_2.append(ego_emb_sub_1)\n                embs_sub_1.append(ego_emb_sub_2)\n            embs_sub_1 = torch.stack(embs_sub_1, dim=1)\n            embs_sub_2 = torch.stack(embs_sub_2, dim=1)\n\n            light_out_sub_1 = torch.mean(embs_sub_1, dim=1)\n            light_out_sub_2 = torch.mean(embs_sub_2, dim=1)\n\n            users_sub_1, items_sub_1 = torch.split(light_out_sub_1, [self.num_users, self.num_items])\n            users_sub_2, items_sub_2 = torch.split(light_out_sub_2, [self.num_users, self.num_items])\n            return users_sub_1[users_idx], items_sub_1[items_idx], users_sub_2[users_idx], items_sub_2[items_idx]\n\n        i_emb_u_sub_1, i_emb_i_sub_1, i_emb_u_sub_2, i_emb_i_sub_2 = compute_graph(users_emb, items_emb)\n        v_emb_u_sub_1, v_emb_i_sub_1, v_emb_u_sub_2, v_emb_i_sub_2 = compute_graph(users_emb, v_dense)\n        if self.config[\"data.input.dataset\"] != \"kwai\":\n            a_emb_u_sub_1, a_emb_i_sub_1, a_emb_u_sub_2, a_emb_i_sub_2 = compute_graph(users_emb, a_dense)\n            t_emb_u_sub_1, t_emb_i_sub_1, t_emb_u_sub_2, t_emb_i_sub_2 = compute_graph(users_emb, t_dense)\n\n        if self.config[\"data.input.dataset\"] == \"kwai\":\n            users_sub_1 = self.embedding_user_after_GCN(self.mm_fusion([i_emb_u_sub_1, v_emb_u_sub_1]))\n            items_sub_1 = self.embedding_item_after_GCN(self.mm_fusion([i_emb_i_sub_1, v_emb_i_sub_1]))\n            users_sub_2 = self.embedding_user_after_GCN(self.mm_fusion([i_emb_u_sub_2, v_emb_u_sub_2]))\n            items_sub_2 = self.embedding_item_after_GCN(self.mm_fusion([i_emb_i_sub_2, v_emb_i_sub_2]))\n        else:\n            users_sub_1 = self.embedding_user_after_GCN(\n                self.mm_fusion([i_emb_u_sub_1, v_emb_u_sub_1, a_emb_u_sub_1, t_emb_u_sub_1]))\n            items_sub_1 = self.embedding_item_after_GCN(\n                self.mm_fusion([i_emb_i_sub_1, v_emb_i_sub_1, a_emb_i_sub_1, t_emb_i_sub_1]))\n            users_sub_2 = self.embedding_user_after_GCN(\n                self.mm_fusion([i_emb_u_sub_2, v_emb_u_sub_2, a_emb_u_sub_2, t_emb_u_sub_2]))\n            items_sub_2 = self.embedding_item_after_GCN(\n                self.mm_fusion([i_emb_i_sub_2, v_emb_i_sub_2, a_emb_i_sub_2, t_emb_i_sub_2]))\n\n        users_sub_1 = torch.nn.functional.normalize(users_sub_1, dim=1)\n        users_sub_2 = torch.nn.functional.normalize(users_sub_2, dim=1)\n        items_sub_1 = torch.nn.functional.normalize(items_sub_1, dim=1)\n        items_sub_2 = torch.nn.functional.normalize(items_sub_2, dim=1)\n\n        logits_user = torch.mm(users_sub_1, users_sub_2.T)\n        logits_user /= self.ssl_temp\n        labels_user = torch.tensor(list(range(users_sub_2.shape[0]))).to(self.device)\n        ssl_loss_user = self.ssl_criterion(logits_user, labels_user)\n\n        logits_item = torch.mm(items_sub_1, items_sub_2.T)\n        logits_item /= self.ssl_temp\n        labels_item = torch.tensor(list(range(items_sub_2.shape[0]))).to(self.device)\n        ssl_loss_item = self.ssl_criterion(logits_item, labels_item)\n\n        return ssl_loss_user + ssl_loss_item\n\n    def feature_masking(self, users_idx, items_idx, dropout=False):\n        users_emb = self.embedding_user.weight\n        items_emb = self.embedding_item.weight\n\n        rand_range = 4 if self.config[\"data.input.dataset\"] != \"kwai\" else 2\n        rand_idx1 = np.random.randint(rand_range)\n        rand_idx2 = 0\n        while True:\n            rand_idx2 = np.random.randint(rand_range)\n            if rand_idx2 != rand_idx1:\n                break\n\n        v_dense = self.v_dense_emb\n        if self.config[\"data.input.dataset\"] != \"kwai\":\n            a_dense = self.a_dense_emb\n            t_dense = self.t_dense_emb\n\n        def compute_graph(u_emb, i_emb, idx):\n            all_emb_1 = torch.cat([u_emb,\n                                   i_emb if rand_idx1 != idx else torch.zeros((self.num_items, self.latent_dim)).to(\n                                       self.device)])\n            all_emb_2 = torch.cat([u_emb,\n                                   i_emb if rand_idx2 != idx else torch.zeros((self.num_items, self.latent_dim)).to(\n                                       self.device)])\n            ego_emb_sub_1 = all_emb_1\n            ego_emb_sub_2 = all_emb_2\n            embs_sub_1 = [ego_emb_sub_1]\n            embs_sub_2 = [ego_emb_sub_2]\n            g_droped = self.norm_adj\n\n            for _ in range(self.n_layers):\n                ego_emb_sub_1 = torch.sparse.mm(g_droped, ego_emb_sub_1)\n                ego_emb_sub_2 = torch.sparse.mm(g_droped, ego_emb_sub_2)\n                if dropout:\n                    ego_emb_sub_1 = self.dropout(ego_emb_sub_1)\n                    ego_emb_sub_2 = self.dropout(ego_emb_sub_2)\n                embs_sub_2.append(ego_emb_sub_1)\n                embs_sub_1.append(ego_emb_sub_2)\n            embs_sub_1 = torch.stack(embs_sub_1, dim=1)\n            embs_sub_2 = torch.stack(embs_sub_2, dim=1)\n\n            light_out_sub_1 = torch.mean(embs_sub_1, dim=1)\n            light_out_sub_2 = torch.mean(embs_sub_2, dim=1)\n\n            users_sub_1, items_sub_1 = torch.split(light_out_sub_1, [self.num_users, self.num_items])\n            users_sub_2, items_sub_2 = torch.split(light_out_sub_2, [self.num_users, self.num_items])\n            return users_sub_1[users_idx], items_sub_1[items_idx], users_sub_2[users_idx], items_sub_2[items_idx]\n\n        i_emb_u_sub_1, i_emb_i_sub_1, i_emb_u_sub_2, i_emb_i_sub_2 = compute_graph(users_emb, items_emb, idx=3)\n        v_emb_u_sub_1, v_emb_i_sub_1, v_emb_u_sub_2, v_emb_i_sub_2 = compute_graph(users_emb, v_dense, idx=0)\n        if self.config[\"data.input.dataset\"] != \"kwai\":\n            a_emb_u_sub_1, a_emb_i_sub_1, a_emb_u_sub_2, a_emb_i_sub_2 = compute_graph(users_emb, a_dense, idx=1)\n            t_emb_u_sub_1, t_emb_i_sub_1, t_emb_u_sub_2, t_emb_i_sub_2 = compute_graph(users_emb, t_dense, idx=2)\n\n        if self.config[\"data.input.dataset\"] == \"kwai\":\n            users_sub_1 = self.embedding_user_after_GCN(self.mm_fusion([i_emb_u_sub_1, v_emb_u_sub_1]))\n            items_sub_1 = self.embedding_item_after_GCN(self.mm_fusion([i_emb_i_sub_1, v_emb_i_sub_1]))\n            users_sub_2 = self.embedding_user_after_GCN(self.mm_fusion([i_emb_u_sub_2, v_emb_u_sub_2]))\n            items_sub_2 = self.embedding_item_after_GCN(self.mm_fusion([i_emb_i_sub_2, v_emb_i_sub_2]))\n        else:\n            users_sub_1 = self.embedding_user_after_GCN(\n                self.mm_fusion([i_emb_u_sub_1, v_emb_u_sub_1, a_emb_u_sub_1, t_emb_u_sub_1]))\n            items_sub_1 = self.embedding_item_after_GCN(\n                self.mm_fusion([i_emb_i_sub_1, v_emb_i_sub_1, a_emb_i_sub_1, t_emb_i_sub_1]))\n            users_sub_2 = self.embedding_user_after_GCN(\n                self.mm_fusion([i_emb_u_sub_2, v_emb_u_sub_2, a_emb_u_sub_2, t_emb_u_sub_2]))\n            items_sub_2 = self.embedding_item_after_GCN(\n                self.mm_fusion([i_emb_i_sub_2, v_emb_i_sub_2, a_emb_i_sub_2, t_emb_i_sub_2]))\n\n        users_sub_1 = torch.nn.functional.normalize(users_sub_1, dim=1)\n        users_sub_2 = torch.nn.functional.normalize(users_sub_2, dim=1)\n        items_sub_1 = torch.nn.functional.normalize(items_sub_1, dim=1)\n        items_sub_2 = torch.nn.functional.normalize(items_sub_2, dim=1)\n\n        logits_user = torch.mm(users_sub_1, users_sub_2.T)\n        logits_user /= self.ssl_temp\n        labels_user = torch.tensor(list(range(users_sub_2.shape[0]))).to(self.device)\n        ssl_loss_user = self.ssl_criterion(logits_user, labels_user)\n\n        logits_item = torch.mm(items_sub_1, items_sub_2.T)\n        logits_item /= self.ssl_temp\n        labels_item = torch.tensor(list(range(items_sub_2.shape[0]))).to(self.device)\n        ssl_loss_item = self.ssl_criterion(logits_item, labels_item)\n\n        return ssl_loss_user + ssl_loss_item\n\n    def fac(self, idx):\n        x_i_iv = self.g_i_iv(self.i_emb_i[idx])\n        x_v_iv = self.g_v_iv(self.v_emb_i[idx])\n        v_logits = torch.mm(x_i_iv, x_v_iv.T)\n\n        v_logits /= self.ssl_temp\n        v_labels = torch.tensor(list(range(x_i_iv.shape[0]))).to(self.device)\n        v_loss = self.infonce_criterion(v_logits, v_labels)\n        if self.config[\"dataset\"] != \"kwai\":\n            x_iv_iva = self.g_iv_iva(x_i_iv)\n            # x_a_iva = self.g_a_iva(self.a_emb_i[idx])\n            # a_logits = torch.mm(x_iv_iva, x_a_iva.T)\n            # a_logits /= self.ssl_temp\n            # a_labels = torch.tensor(list(range(x_iv_iva.shape[0]))).to(self.device)\n            # a_loss = self.infonce_criterion(a_logits, a_labels)\n            #\n            x_iva_ivat = self.g_iva_ivat(x_iv_iva)\n            x_t_ivat = self.g_t_ivat(self.t_emb_i[idx])\n\n            t_logits = torch.mm(x_iva_ivat, x_t_ivat.T)\n            t_logits /= self.ssl_temp\n            t_labels = torch.tensor(list(range(x_iva_ivat.shape[0]))).to(self.device)\n            t_loss = self.infonce_criterion(t_logits, t_labels)\n\n            #return v_loss + a_loss + t_loss\n            return v_loss + t_loss\n        else:\n            return v_loss\n\n    def full_sort_predict(self, interaction, candidate_items=None):\n        users = interaction[0]\n        users_emb = self.all_users[users]\n        if candidate_items is None:\n            items_emb = self.all_items\n        else:\n            items_emb = self.all_items[torch.tensor(candidate_items).long().to(self.device)]\n        scores = torch.matmul(users_emb, items_emb.t())\n        return self.f(scores)\n\n    def getEmbedding(self, users, pos_items, neg_items):\n        self.all_users, self.all_items = self.compute()\n        users_emb = self.all_users[users]\n        pos_emb = self.all_items[pos_items]\n        users_emb_ego = self.embedding_user(users)\n        pos_emb_ego = self.embedding_item(pos_items)\n\n        if neg_items is None:\n            neg_emb_ego = neg_emb = None\n        else:\n            neg_emb = self.all_items[neg_items]\n            neg_emb_ego = self.embedding_item(neg_items)\n\n        return users_emb, pos_emb, neg_emb, users_emb_ego, pos_emb_ego, neg_emb_ego\n\n    def calculate_loss(self, interaction):\n        # multi-task loss\n        users, pos = interaction[0], interaction[1]\n        main_loss = self.infonce(users, pos)\n        ssl_loss = self.compute_ssl(users, pos)\n        return main_loss + self.config['ssl_alpha'] * ssl_loss\n\n    def ssl_loss(self, users, pos):\n        # compute ssl loss\n        self.getEmbedding(users.long(), pos.long(), None)\n        return self.compute_ssl(users, pos)\n\n    def compute_ssl(self, users, items):\n        if self.config[\"ssl_task\"] == \"FAC\":\n            return self.fac(items)\n        elif self.config[\"ssl_task\"] == \"FD\":\n            return self.feature_dropout(users.long(), items.long())\n        elif self.config[\"ssl_task\"] == \"FM\":\n            return self.feature_masking(users.long(), items.long())\n        elif self.config[\"ssl_task\"] == \"FD+FM\":\n            return self.feature_masking(users.long(), items.long(), dropout=True)\n\n    def forward(self, users, items):\n        all_users, all_items = self.compute()\n        users_emb = all_users[users]\n        items_emb = all_items[items]\n        inner_pro = torch.mul(users_emb, items_emb)\n        gamma = torch.sum(inner_pro, dim=1)\n        return gamma.detach()\n\n    def mm_fusion(self, reps: list):\n        if self.mm_fusion_mode == \"concat\":\n            z = torch.cat(reps, dim=1)\n        elif self.mm_fusion_mode == \"mean\":\n            z = torch.mean(torch.stack(reps), dim=0)\n        return z\n\n    def infonce(self, users, pos):\n        (users_emb, pos_emb, neg_emb,\n         userEmb0, posEmb0, negEmb0) = self.getEmbedding(users.long(), pos.long(), None)\n        users_emb = torch.nn.functional.normalize(users_emb, dim=1)\n        pos_emb = torch.nn.functional.normalize(pos_emb, dim=1)\n        logits = torch.mm(users_emb, pos_emb.T)\n        logits /= self.temp\n        labels = torch.tensor(list(range(users_emb.shape[0]))).to(self.device)\n\n        return self.infonce_criterion(logits, labels)\n\n    def create_u_embeding_i(self):\n        self.embedding_user = torch.nn.Embedding(num_embeddings=self.num_users, embedding_dim=self.latent_dim)\n        self.embedding_item = torch.nn.Embedding(num_embeddings=self.num_items, embedding_dim=self.latent_dim)\n\n        if self.config[\"init\"] == \"xavier\":\n            nn.init.xavier_uniform_(self.embedding_user.weight, gain=1)\n            nn.init.xavier_uniform_(self.embedding_item.weight, gain=1)\n        elif self.config[\"init\"] == \"normal\":\n            nn.init.normal_(self.embedding_user.weight, std=0.1)\n            nn.init.normal_(self.embedding_item_ID.weight, std=0.1)\n\n        # load features, updated by enoche\n        mul_modal_cnt = 0\n        if self.v_feat is not None:\n            self.v_feat = torch.nn.functional.normalize(self.v_feat, dim=1)\n            self.v_dense = nn.Linear(self.v_feat.shape[1], self.latent_dim)\n            nn.init.xavier_uniform_(self.v_dense.weight)\n            mul_modal_cnt += 1\n        if self.t_feat is not None:\n            self.t_feat = torch.nn.functional.normalize(self.t_feat, dim=1)\n            self.t_dense = nn.Linear(self.t_feat.shape[1], self.latent_dim)\n            nn.init.xavier_uniform_(self.t_dense.weight)\n            mul_modal_cnt += 1\n            # if self.config[\"dataset\"] != \"kwai\":\n            #     if self.a_feat is not None:\n            #         self.a_feat = torch.nn.functional.normalize(self.a_feat, dim=1)\n            #     if self.config[\"dataset\"] == \"tiktok\":\n            #         self.words_tensor = self.dataset.words_tensor.to(self.device)\n            #         self.word_embedding = torch.nn.Embedding(11574, 128).to(self.device)\n            #         torch.nn.init.xavier_normal_(self.word_embedding.weight)\n            #         self.t_feat = scatter(self.word_embedding(self.words_tensor[1]), self.words_tensor[0], reduce='mean',\n            #                               dim=0).to(self.device)\n            #     else:\n            #         self.t_feat = torch.nn.functional.normalize(self.dataset.t_feat.to(self.device).float(), dim=1)\n\n        # visual feature dense\n        # if self.config[\"data.input.dataset\"] != \"kwai\":\n        #     # acoustic feature dense\n        #     self.a_dense = nn.Linear(self.a_feat.shape[1], self.latent_dim)\n        #     # textual feature dense\n        #     self.t_dense = nn.Linear(self.t_feat.shape[1], self.latent_dim)\n\n        self.item_feat_dim = self.latent_dim * (mul_modal_cnt + 1)\n\n        # nn.init.xavier_uniform_(self.v_dense.weight)\n        # if self.config[\"data.input.dataset\"] != \"kwai\":\n        #     nn.init.xavier_uniform_(self.a_dense.weight)\n        #     nn.init.xavier_uniform_(self.t_dense.weight)\n\n        self.embedding_item_after_GCN = nn.Linear(self.item_feat_dim, self.latent_dim)\n        self.embedding_user_after_GCN = nn.Linear(self.item_feat_dim, self.latent_dim)\n        nn.init.xavier_uniform_(self.embedding_item_after_GCN.weight)\n        nn.init.xavier_uniform_(self.embedding_user_after_GCN.weight)\n\n    def create_adj_mat(self, interaction_csr):\n        user_np, item_np = interaction_csr.nonzero()\n        # user_list, item_list = self.dataset.get_train_interactions()\n        # user_np = np.array(user_list, dtype=np.int32)\n        # item_np = np.array(item_list, dtype=np.int32)\n        ratings = np.ones_like(user_np, dtype=np.float32)\n        n_nodes = self.num_users + self.num_items\n        tmp_adj = sp.csr_matrix((ratings, (user_np, item_np + self.num_users)), shape=(n_nodes, n_nodes))\n        adj_mat = tmp_adj + tmp_adj.T\n\n        def normalized_adj_single(adj):\n            rowsum = np.array(adj.sum(1))\n            d_inv = np.power(rowsum, -1).flatten()\n            d_inv[np.isinf(d_inv)] = 0.\n            d_mat_inv = sp.diags(d_inv)\n\n            norm_adj = d_mat_inv.dot(adj)\n            print('generate single-normalized adjacency matrix.')\n            return norm_adj.tocoo()\n\n        adj_type = self.config['adj_type']\n        if adj_type == 'plain':\n            adj_matrix = adj_mat\n            print('use the plain adjacency matrix')\n        elif adj_type == 'norm':\n            adj_matrix = normalized_adj_single(adj_mat + sp.eye(adj_mat.shape[0]))\n            print('use the normalized adjacency matrix')\n        elif adj_type == 'gcmc':\n            adj_matrix = normalized_adj_single(adj_mat)\n            print('use the gcmc adjacency matrix')\n        elif adj_type == 'pre':\n            # pre adjcency matrix\n            rowsum = np.array(adj_mat.sum(1)) + 1e-08    # avoid RuntimeWarning: divide by zero encountered in power\n            d_inv = np.power(rowsum, -0.5).flatten()\n            d_inv[np.isinf(d_inv)] = 0.\n            d_mat_inv = sp.diags(d_inv)\n\n            norm_adj_tmp = d_mat_inv.dot(adj_mat)\n            adj_matrix = norm_adj_tmp.dot(d_mat_inv)\n            print('use the pre adjcency matrix')\n        else:\n            mean_adj = normalized_adj_single(adj_mat)\n            adj_matrix = mean_adj + sp.eye(mean_adj.shape[0])\n            print('use the mean adjacency matrix')\n\n        return adj_matrix\n\n"
  },
  {
    "path": "src/models/smore.py",
    "content": "# coding: utf-8\n# rongqing001@e.ntu.edu.sg\nr\"\"\"\nSMORE - Multi-modal Recommender System\nReference:\n    ACM WSDM 2025: Spectrum-based Modality Representation Fusion Graph Convolutional Network for Multimodal Recommendation\n\nReference Code:\n    https://github.com/kennethorq/SMORE\n\"\"\"\n\nimport os\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport sys\nimport math\nfrom common.abstract_recommender import GeneralRecommender\nfrom utils.utils import build_sim, compute_normalized_laplacian, build_knn_neighbourhood, build_knn_normalized_graph\n\n\nclass SMORE(GeneralRecommender):\n    def __init__(self, config, dataset):\n        super(SMORE, self).__init__(config, dataset)\n        self.sparse = True\n        self.cl_loss = config['cl_loss']\n        self.n_ui_layers = config['n_ui_layers']\n        self.embedding_dim = config['embedding_size']\n        self.n_layers = config['n_layers']\n        self.reg_weight = config['reg_weight']\n        self.image_knn_k = config['image_knn_k']\n        self.text_knn_k = config['text_knn_k']\n        self.dropout_rate = config['dropout_rate']\n        self.dropout = nn.Dropout(p=self.dropout_rate)\n\n        self.interaction_matrix = dataset.inter_matrix(form='coo').astype(np.float32)\n\n        self.user_embedding = nn.Embedding(self.n_users, self.embedding_dim)\n        self.item_id_embedding = nn.Embedding(self.n_items, self.embedding_dim)\n        nn.init.xavier_uniform_(self.user_embedding.weight)\n        nn.init.xavier_uniform_(self.item_id_embedding.weight)\n\n        dataset_path = os.path.abspath(config['data_path'] + config['dataset'])\n        image_adj_file = os.path.join(dataset_path, 'image_adj_{}_{}.pt'.format(self.image_knn_k, self.sparse))\n        text_adj_file = os.path.join(dataset_path, 'text_adj_{}_{}.pt'.format(self.text_knn_k, self.sparse))\n\n        self.norm_adj = self.get_adj_mat()\n        self.R_sprse_mat = self.R\n        self.R = self.sparse_mx_to_torch_sparse_tensor(self.R).float().to(self.device)\n        self.norm_adj = self.sparse_mx_to_torch_sparse_tensor(self.norm_adj).float().to(self.device)\n\n        if self.v_feat is not None:\n            self.image_embedding = nn.Embedding.from_pretrained(self.v_feat, freeze=False)\n            if os.path.exists(image_adj_file):\n                image_adj = torch.load(image_adj_file)\n            else:\n                image_adj = build_sim(self.image_embedding.weight.detach())\n                image_adj = build_knn_normalized_graph(image_adj, topk=self.image_knn_k, is_sparse=self.sparse,\n                                                       norm_type='sym')\n                torch.save(image_adj, image_adj_file)\n            self.image_original_adj = image_adj.cuda()\n\n        if self.t_feat is not None:\n            self.text_embedding = nn.Embedding.from_pretrained(self.t_feat, freeze=False)\n            if os.path.exists(text_adj_file):\n                text_adj = torch.load(text_adj_file)\n            else:\n                text_adj = build_sim(self.text_embedding.weight.detach())\n                text_adj = build_knn_normalized_graph(text_adj, topk=self.text_knn_k, is_sparse=self.sparse, norm_type='sym')\n                torch.save(text_adj, text_adj_file)\n            self.text_original_adj = text_adj.cuda() \n\n        self.fusion_adj = self.max_pool_fusion()\n\n        if self.v_feat is not None:\n            self.image_trs = nn.Linear(self.v_feat.shape[1], self.embedding_dim)\n        if self.t_feat is not None:\n            self.text_trs = nn.Linear(self.t_feat.shape[1], self.embedding_dim)\n\n        self.softmax = nn.Softmax(dim=-1)\n\n        self.query_v = nn.Sequential(\n            nn.Linear(self.embedding_dim, self.embedding_dim),\n            nn.Tanh(),\n            nn.Linear(self.embedding_dim, self.embedding_dim, bias=False)\n        )\n        self.query_t = nn.Sequential(\n            nn.Linear(self.embedding_dim, self.embedding_dim),\n            nn.Tanh(),\n            nn.Linear(self.embedding_dim, self.embedding_dim, bias=False)\n        )\n\n        self.gate_v = nn.Sequential(\n            nn.Linear(self.embedding_dim, self.embedding_dim),\n            nn.Sigmoid()\n        )\n\n        self.gate_t = nn.Sequential(\n            nn.Linear(self.embedding_dim, self.embedding_dim),\n            nn.Sigmoid()\n        )\n\n        self.gate_f = nn.Sequential(\n            nn.Linear(self.embedding_dim, self.embedding_dim),\n            nn.Sigmoid()\n        )\n\n        self.gate_image_prefer = nn.Sequential(\n            nn.Linear(self.embedding_dim, self.embedding_dim),\n            nn.Sigmoid()\n        )\n\n        self.gate_text_prefer = nn.Sequential(\n            nn.Linear(self.embedding_dim, self.embedding_dim),\n            nn.Sigmoid()\n        )\n        self.gate_fusion_prefer = nn.Sequential(\n            nn.Linear(self.embedding_dim, self.embedding_dim),\n            nn.Sigmoid()\n        )\n\n        self.image_complex_weight = nn.Parameter(torch.randn(1, self.embedding_dim // 2 + 1, 2, dtype=torch.float32))\n        self.text_complex_weight = nn.Parameter(torch.randn(1, self.embedding_dim // 2 + 1, 2, dtype=torch.float32))\n        self.fusion_complex_weight = nn.Parameter(torch.randn(1, self.embedding_dim // 2 + 1, 2, dtype=torch.float32))\n        \n\n    def pre_epoch_processing(self):\n        pass\n\n    def max_pool_fusion(self):\n        image_adj = self.image_original_adj.coalesce()\n        text_adj = self.text_original_adj.coalesce()\n\n        image_indices = image_adj.indices().to(self.device)\n        image_values = image_adj.values().to(self.device)\n        text_indices = text_adj.indices().to(self.device)\n        text_values = text_adj.values().to(self.device)\n\n        combined_indices = torch.cat((image_indices, text_indices), dim=1)\n        combined_indices, unique_idx = torch.unique(combined_indices, dim=1, return_inverse=True)\n\n        combined_values_image = torch.full((combined_indices.size(1),), float('-inf')).to(self.device)\n        combined_values_text = torch.full((combined_indices.size(1),), float('-inf')).to(self.device)\n\n        combined_values_image[unique_idx[:image_indices.size(1)]] = image_values\n        combined_values_text[unique_idx[image_indices.size(1):]] = text_values\n        combined_values, _ = torch.max(torch.stack((combined_values_image, combined_values_text)), dim=0)\n\n        fusion_adj = torch.sparse.FloatTensor(combined_indices, combined_values, image_adj.size()).coalesce()\n\n        return fusion_adj\n\n    def get_adj_mat(self):\n        adj_mat = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32)\n        adj_mat = adj_mat.tolil()\n        R = self.interaction_matrix.tolil()\n\n        adj_mat[:self.n_users, self.n_users:] = R\n        adj_mat[self.n_users:, :self.n_users] = R.T\n        adj_mat = adj_mat.todok()\n\n        def normalized_adj_single(adj):\n            rowsum = np.array(adj.sum(1))\n\n            d_inv = np.power(rowsum, -0.5).flatten()\n            d_inv[np.isinf(d_inv)] = 0.\n            d_mat_inv = sp.diags(d_inv)\n\n            norm_adj = d_mat_inv.dot(adj_mat)\n            norm_adj = norm_adj.dot(d_mat_inv)\n            return norm_adj.tocoo()\n\n        norm_adj_mat = normalized_adj_single(adj_mat)\n        norm_adj_mat = norm_adj_mat.tolil()\n        self.R = norm_adj_mat[:self.n_users, self.n_users:]\n        return norm_adj_mat.tocsr()\n\n    def sparse_mx_to_torch_sparse_tensor(self, sparse_mx):\n        \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n        sparse_mx = sparse_mx.tocoo().astype(np.float32)\n        indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n        values = torch.from_numpy(sparse_mx.data)\n        shape = torch.Size(sparse_mx.shape)\n        return torch.sparse.FloatTensor(indices, values, shape)\n\n    def spectrum_convolution(self, image_embeds, text_embeds):\n        \"\"\"\n        Modality Denoising & Cross-Modality Fusion\n        \"\"\"\n        image_fft = torch.fft.rfft(image_embeds, dim=1, norm='ortho')           \n        text_fft = torch.fft.rfft(text_embeds, dim=1, norm='ortho')\n\n        image_complex_weight = torch.view_as_complex(self.image_complex_weight)   \n        text_complex_weight = torch.view_as_complex(self.text_complex_weight)\n        fusion_complex_weight = torch.view_as_complex(self.fusion_complex_weight)\n\n        #   Uni-modal Denoising\n        image_conv = torch.fft.irfft(image_fft * image_complex_weight, n=image_embeds.shape[1], dim=1, norm='ortho')    \n        text_conv = torch.fft.irfft(text_fft * text_complex_weight, n=text_embeds.shape[1], dim=1, norm='ortho')\n\n        #   Cross-modality fusion\n        fusion_conv = torch.fft.irfft(text_fft * image_fft * fusion_complex_weight, n=text_embeds.shape[1], dim=1, norm='ortho') \n        \n        return image_conv, text_conv, fusion_conv\n    \n    def forward(self, adj, train=False):\n        if self.v_feat is not None:\n            image_feats = self.image_trs(self.image_embedding.weight)\n        if self.t_feat is not None:\n            text_feats = self.text_trs(self.text_embedding.weight)\n\n        #   Spectrum Modality Fusion\n        image_conv, text_conv, fusion_conv = self.spectrum_convolution(image_feats, text_feats)\n        image_item_embeds = torch.multiply(self.item_id_embedding.weight, self.gate_v(image_conv))\n        text_item_embeds = torch.multiply(self.item_id_embedding.weight, self.gate_t(text_conv))\n        fusion_item_embeds = torch.multiply(self.item_id_embedding.weight, self.gate_f(fusion_conv))\n\n        #   User-Item (Behavioral) View\n        item_embeds = self.item_id_embedding.weight\n        user_embeds = self.user_embedding.weight\n        ego_embeddings = torch.cat([user_embeds, item_embeds], dim=0)\n        all_embeddings = [ego_embeddings]\n\n        for i in range(self.n_ui_layers):\n            side_embeddings = torch.sparse.mm(adj, ego_embeddings)\n            ego_embeddings = side_embeddings\n            all_embeddings += [ego_embeddings]\n        all_embeddings = torch.stack(all_embeddings, dim=1)\n        all_embeddings = all_embeddings.mean(dim=1, keepdim=False)\n        content_embeds = all_embeddings\n\n        #   Item-Item Modality Specific and Fusion views\n        #   Image-view\n        if self.sparse:\n            for i in range(self.n_layers):\n                image_item_embeds = torch.sparse.mm(self.image_original_adj, image_item_embeds)\n        else:\n            for i in range(self.n_layers):\n                image_item_embeds = torch.mm(self.image_original_adj, image_item_embeds)\n        image_user_embeds = torch.sparse.mm(self.R, image_item_embeds)\n        image_embeds = torch.cat([image_user_embeds, image_item_embeds], dim=0)\n\n        #   Text-view\n        if self.sparse:\n            for i in range(self.n_layers):\n                text_item_embeds = torch.sparse.mm(self.text_original_adj, text_item_embeds)\n        else:\n            for i in range(self.n_layers):\n                text_item_embeds = torch.mm(self.text_original_adj, text_item_embeds)\n        text_user_embeds = torch.sparse.mm(self.R, text_item_embeds)\n        text_embeds = torch.cat([text_user_embeds, text_item_embeds], dim=0)\n\n        #   Fusion-view\n        if self.sparse:\n            for i in range(self.n_layers):\n                fusion_item_embeds = torch.sparse.mm(self.fusion_adj, fusion_item_embeds)\n        else:\n            for i in range(self.n_layers):\n                fusion_item_embeds = torch.mm(self.fusion_adj, fusion_item_embeds)\n        fusion_user_embeds = torch.sparse.mm(self.R, fusion_item_embeds)\n        fusion_embeds = torch.cat([fusion_user_embeds, fusion_item_embeds], dim=0)\n\n        #   Modality-aware Preference Module\n        fusion_att_v, fusion_att_t = self.query_v(fusion_embeds), self.query_t(fusion_embeds)\n        fusion_soft_v = self.softmax(fusion_att_v)\n        agg_image_embeds = fusion_soft_v * image_embeds\n\n        fusion_soft_t = self.softmax(fusion_att_t)\n        agg_text_embeds = fusion_soft_t * text_embeds\n\n        image_prefer = self.gate_image_prefer(content_embeds)\n        text_prefer = self.gate_text_prefer(content_embeds)\n        fusion_prefer = self.gate_fusion_prefer(content_embeds)\n        image_prefer, text_prefer, fusion_prefer = self.dropout(image_prefer), self.dropout(text_prefer), self.dropout(fusion_prefer)\n\n        agg_image_embeds = torch.multiply(image_prefer, agg_image_embeds)\n        agg_text_embeds = torch.multiply(text_prefer, agg_text_embeds)\n        fusion_embeds = torch.multiply(fusion_prefer, fusion_embeds)\n\n        side_embeds = torch.mean(torch.stack([agg_image_embeds, agg_text_embeds, fusion_embeds]), dim=0) \n\n        all_embeds = content_embeds + side_embeds\n\n        all_embeddings_users, all_embeddings_items = torch.split(all_embeds, [self.n_users, self.n_items], dim=0)\n\n        if train:\n            return all_embeddings_users, all_embeddings_items, side_embeds, content_embeds\n\n        return all_embeddings_users, all_embeddings_items\n\n    def bpr_loss(self, users, pos_items, neg_items):\n        pos_scores = torch.sum(torch.mul(users, pos_items), dim=1)\n        neg_scores = torch.sum(torch.mul(users, neg_items), dim=1)\n\n        regularizer = 1. / 2 * (users ** 2).sum() + 1. / 2 * (pos_items ** 2).sum() + 1. / 2 * (neg_items ** 2).sum()\n        regularizer = regularizer / self.batch_size\n\n        maxi = F.logsigmoid(pos_scores - neg_scores)\n        mf_loss = -torch.mean(maxi)\n\n        emb_loss = self.reg_weight * regularizer\n        reg_loss = 0.0\n        return mf_loss, emb_loss, reg_loss\n\n    def InfoNCE(self, view1, view2, temperature):\n        view1, view2 = F.normalize(view1, dim=1), F.normalize(view2, dim=1)\n        pos_score = (view1 * view2).sum(dim=-1)\n        pos_score = torch.exp(pos_score / temperature)\n        ttl_score = torch.matmul(view1, view2.transpose(0, 1))\n        ttl_score = torch.exp(ttl_score / temperature).sum(dim=1)\n        cl_loss = -torch.log(pos_score / ttl_score)\n        return torch.mean(cl_loss)\n\n    def calculate_loss(self, interaction):\n        users = interaction[0]\n        pos_items = interaction[1]\n        neg_items = interaction[2]\n\n        ua_embeddings, ia_embeddings, side_embeds, content_embeds = self.forward(\n            self.norm_adj, train=True)\n\n        u_g_embeddings = ua_embeddings[users]\n        pos_i_g_embeddings = ia_embeddings[pos_items]\n        neg_i_g_embeddings = ia_embeddings[neg_items]\n\n        batch_mf_loss, batch_emb_loss, batch_reg_loss = self.bpr_loss(u_g_embeddings, pos_i_g_embeddings,\n                                                                      neg_i_g_embeddings)\n\n        side_embeds_users, side_embeds_items = torch.split(side_embeds, [self.n_users, self.n_items], dim=0)\n        content_embeds_user, content_embeds_items = torch.split(content_embeds, [self.n_users, self.n_items], dim=0)\n        cl_loss = self.InfoNCE(side_embeds_items[pos_items], content_embeds_items[pos_items], 0.2) + self.InfoNCE(\n            side_embeds_users[users], content_embeds_user[users], 0.2)\n\n        return batch_mf_loss + batch_emb_loss + batch_reg_loss + self.cl_loss * cl_loss\n\n    def full_sort_predict(self, interaction):\n        user = interaction[0]\n\n        restore_user_e, restore_item_e = self.forward(self.norm_adj)\n        u_embeddings = restore_user_e[user]\n\n        # dot with all item embedding to accelerate\n        scores = torch.matmul(u_embeddings, restore_item_e.transpose(0, 1))\n        return scores"
  },
  {
    "path": "src/models/vbpr.py",
    "content": "# coding: utf-8\n# @email: enoche.chow@gmail.com\nr\"\"\"\nVBPR -- Recommended version\n################################################\nReference:\nVBPR: Visual Bayesian Personalized Ranking from Implicit Feedback -Ruining He, Julian McAuley. AAAI'16\n\"\"\"\nimport numpy as np\nimport os\nimport torch\nimport torch.nn as nn\n\nfrom common.abstract_recommender import GeneralRecommender\nfrom common.loss import BPRLoss, EmbLoss\nfrom common.init import xavier_normal_initialization\nimport torch.nn.functional as F\n\n\nclass VBPR(GeneralRecommender):\n    r\"\"\"BPR is a basic matrix factorization model that be trained in the pairwise way.\n    \"\"\"\n    def __init__(self, config, dataloader):\n        super(VBPR, self).__init__(config, dataloader)\n\n        # load parameters info\n        self.u_embedding_size = self.i_embedding_size = config['embedding_size']\n        self.reg_weight = config['reg_weight']  # float32 type: the weight decay for l2 normalizaton\n\n        # define layers and loss\n        self.u_embedding = nn.Parameter(nn.init.xavier_uniform_(torch.empty(self.n_users, self.u_embedding_size * 2)))\n        self.i_embedding = nn.Parameter(nn.init.xavier_uniform_(torch.empty(self.n_items, self.i_embedding_size)))\n        if self.v_feat is not None and self.t_feat is not None:\n            self.item_raw_features = torch.cat((self.t_feat, self.v_feat), -1)\n        elif self.v_feat is not None:\n            self.item_raw_features = self.v_feat\n        else:\n            self.item_raw_features = self.t_feat\n\n        self.item_linear = nn.Linear(self.item_raw_features.shape[1], self.i_embedding_size)\n        self.loss = BPRLoss()\n        self.reg_loss = EmbLoss()\n\n        # parameters initialization\n        self.apply(xavier_normal_initialization)\n\n    def get_user_embedding(self, user):\n        r\"\"\" Get a batch of user embedding tensor according to input user's id.\n\n        Args:\n            user (torch.LongTensor): The input tensor that contains user's id, shape: [batch_size, ]\n\n        Returns:\n            torch.FloatTensor: The embedding tensor of a batch of user, shape: [batch_size, embedding_size]\n        \"\"\"\n        return self.u_embedding[user, :]\n\n    def get_item_embedding(self, item):\n        r\"\"\" Get a batch of item embedding tensor according to input item's id.\n\n        Args:\n            item (torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]\n\n        Returns:\n            torch.FloatTensor: The embedding tensor of a batch of item, shape: [batch_size, embedding_size]\n        \"\"\"\n        return self.item_embedding[item, :]\n\n    def forward(self, dropout=0.0):\n        item_embeddings = self.item_linear(self.item_raw_features)\n        item_embeddings = torch.cat((self.i_embedding, item_embeddings), -1)\n\n        user_e = F.dropout(self.u_embedding, dropout)\n        item_e = F.dropout(item_embeddings, dropout)\n        return user_e, item_e\n\n    def calculate_loss(self, interaction):\n        \"\"\"\n        loss on one batch\n        :param interaction:\n            batch data format: tensor(3, batch_size)\n            [0]: user list; [1]: positive items; [2]: negative items\n        :return:\n        \"\"\"\n        user = interaction[0]\n        pos_item = interaction[1]\n        neg_item = interaction[2]\n\n        user_embeddings, item_embeddings = self.forward()\n        user_e = user_embeddings[user, :]\n        pos_e = item_embeddings[pos_item, :]\n        #neg_e = self.get_item_embedding(neg_item)\n        neg_e = item_embeddings[neg_item, :]\n        pos_item_score, neg_item_score = torch.mul(user_e, pos_e).sum(dim=1), torch.mul(user_e, neg_e).sum(dim=1)\n        mf_loss = self.loss(pos_item_score, neg_item_score)\n        reg_loss = self.reg_loss(user_e, pos_e, neg_e)\n        loss = mf_loss + self.reg_weight * reg_loss\n        return loss\n\n    def full_sort_predict(self, interaction):\n        user = interaction[0]\n        user_embeddings, item_embeddings = self.forward()\n        user_e = user_embeddings[user, :]\n        all_item_e = item_embeddings\n        score = torch.matmul(user_e, all_item_e.transpose(0, 1))\n        return score\n"
  },
  {
    "path": "src/utils/configurator.py",
    "content": "# coding: utf-8\n# @email: enoche.chow@gmail.com\n#\n\"\"\"\n################################\n\"\"\"\n\nimport re\nimport os\nimport yaml\nimport torch\nfrom logging import getLogger\n\n\nclass Config(object):\n    \"\"\" Configurator module that load the defined parameters.\n\n    Configurator module will first load the default parameters from the fixed properties in RecBole and then\n    load parameters from the external input.\n\n    External input supports three kind of forms: config file, command line and parameter dictionaries.\n\n    - config file: It's a file that record the parameters to be modified or added. It should be in ``yaml`` format,\n      e.g. a config file is 'example.yaml', the content is:\n\n        learning_rate: 0.001\n\n        train_batch_size: 2048\n\n    - command line: It should be in the format as '---learning_rate=0.001'\n\n    - parameter dictionaries: It should be a dict, where the key is parameter name and the value is parameter value,\n      e.g. config_dict = {'learning_rate': 0.001}\n\n    Configuration module allows the above three kind of external input format to be used together,\n    the priority order is as following:\n\n    command line > parameter dictionaries > config file\n\n    e.g. If we set learning_rate=0.01 in config file, learning_rate=0.02 in command line,\n    learning_rate=0.03 in parameter dictionaries.\n\n    Finally the learning_rate is equal to 0.02.\n    \"\"\"\n\n    def __init__(self, model=None, dataset=None, config_dict=None, mg=False):\n        \"\"\"\n        Args:\n            model (str/AbstractRecommender): the model name or the model class, default is None, if it is None, config\n            will search the parameter 'model' from the external input as the model name or model class.\n            dataset (str): the dataset name, default is None, if it is None, config will search the parameter 'dataset'\n            from the external input as the dataset name.\n            config_file_list (list of str): the external config file, it allows multiple config files, default is None.\n            config_dict (dict): the external parameter dictionaries, default is None.\n        \"\"\"\n        # load dataset config file yaml\n        if config_dict is None:\n            config_dict = {}\n        config_dict['model'] = model\n        config_dict['dataset'] = dataset\n        # model type\n        self.final_config_dict = self._load_dataset_model_config(config_dict, mg)\n        # config in cmd and main.py are latest\n        self.final_config_dict.update(config_dict)\n        self._set_default_parameters()\n        self._init_device()\n\n    def _load_dataset_model_config(self, config_dict, mg):\n        file_config_dict = dict()\n        file_list = []\n        # get dataset and model files\n        cur_dir = os.getcwd()\n        cur_dir = os.path.join(cur_dir, 'configs')\n        file_list.append(os.path.join(cur_dir, \"overall.yaml\"))\n        file_list.append(os.path.join(cur_dir, \"dataset\", \"{}.yaml\".format(config_dict['dataset'])))\n        file_list.append(os.path.join(cur_dir, \"model\", \"{}.yaml\".format(config_dict['model'])))\n        if mg:\n            file_list.append(os.path.join(cur_dir, \"mg.yaml\"))\n\n        hyper_parameters = []\n        for file in file_list:\n            if os.path.isfile(file):\n                with open(file, 'r', encoding='utf-8') as f:\n                    fdata = yaml.load(f.read(), Loader=self._build_yaml_loader())\n                    if fdata.get('hyper_parameters'):\n                        hyper_parameters.extend(fdata['hyper_parameters'])\n                    file_config_dict.update(fdata)\n                    \n        file_config_dict['hyper_parameters'] = hyper_parameters\n        return file_config_dict\n\n    def _build_yaml_loader(self):\n        loader = yaml.FullLoader\n        loader.add_implicit_resolver(\n            u'tag:yaml.org,2002:float',\n            re.compile(u'''^(?:\n             [-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)?\n            |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)\n            |\\\\.[0-9_]+(?:[eE][-+][0-9]+)?\n            |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]*\n            |[-+]?\\\\.(?:inf|Inf|INF)\n            |\\\\.(?:nan|NaN|NAN))$''', re.X),\n            list(u'-+0123456789.'))\n        return loader\n\n    def _set_default_parameters(self):\n        smaller_metric = ['rmse', 'mae', 'logloss']\n        valid_metric = self.final_config_dict['valid_metric'].split('@')[0]\n        self.final_config_dict['valid_metric_bigger'] = False if valid_metric in smaller_metric else True\n        # if seed not in hyper_parameters, then add\n        if \"seed\" not in self.final_config_dict['hyper_parameters']:\n            self.final_config_dict['hyper_parameters'] += ['seed']\n\n    def _init_device(self):\n        use_gpu = self.final_config_dict['use_gpu']\n        if use_gpu:\n            os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(self.final_config_dict['gpu_id'])\n        self.final_config_dict['device'] = torch.device(\"cuda\" if torch.cuda.is_available() and use_gpu else \"cpu\")\n\n    def __setitem__(self, key, value):\n        if not isinstance(key, str):\n            raise TypeError(\"index must be a str.\")\n        self.final_config_dict[key] = value\n\n    def __getitem__(self, item):\n        if item in self.final_config_dict:\n            return self.final_config_dict[item]\n        else:\n            return None\n\n    def __contains__(self, key):\n        if not isinstance(key, str):\n            raise TypeError(\"index must be a str.\")\n        return key in self.final_config_dict\n\n    def __str__(self):\n        args_info = '\\n'\n        args_info += '\\n'.join([\"{}={}\".format(arg, value) for arg, value in self.final_config_dict.items()])\n        args_info += '\\n\\n'\n        return args_info\n\n    def __repr__(self):\n        return self.__str__()\n"
  },
  {
    "path": "src/utils/data_utils.py",
    "content": "import torch\nimport random\nimport torchvision.transforms as transforms\nfrom torchvision.transforms.functional import pad as img_pad\nfrom torchvision.transforms.functional import resize as img_resize\nfrom torch.nn.functional import interpolate as img_tensor_resize\nfrom torch.nn.functional import pad as img_tensor_pad\nfrom torch.nn.modules.utils import _quadruple\nimport numbers\nimport numpy as np\nfrom PIL import Image\n_pil_interpolation_to_str = {\n    Image.NEAREST: 'PIL.Image.NEAREST',\n    Image.BILINEAR: 'PIL.Image.BILINEAR',\n    Image.BICUBIC: 'PIL.Image.BICUBIC',\n    Image.LANCZOS: 'PIL.Image.LANCZOS',\n    Image.HAMMING: 'PIL.Image.HAMMING',\n    Image.BOX: 'PIL.Image.BOX',\n}\n\n\ndef flat_list_of_lists(l):\n    \"\"\"flatten a list of lists [[1,2], [3,4]] to [1,2,3,4]\"\"\"\n    return [item for sublist in l for item in sublist]\n\n\ndef mask_batch_text_tokens(\n        inputs, tokenizer, mlm_probability=0.15, is_train=True):\n    \"\"\" modified from transformers.data.data_collator\n    Args:\n        inputs: (B, L), 2D torch.Tensor, does not work for 1D. It has already been padded.\n        tokenizer:\n        mlm_probability: float\n        is_train: if True use random masking, else mask tokens at fixed position to remove randomness in evaluation.\n    \"\"\"\n    if tokenizer.mask_token is None:\n        raise ValueError(\n            \"This tokenizer does not have a mask token which is necessary for masked language modeling. \"\n            \"Remove the --mlm flag if you want to use this tokenizer.\"\n        )\n\n    labels = inputs.clone()\n    # We sample a few tokens in each sequence for masked-LM training\n    # (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)\n    probability_matrix = torch.full(labels.shape, mlm_probability)\n    special_tokens_mask = [\n        tokenizer.get_special_tokens_mask(\n            val, already_has_special_tokens=True) for val in labels.tolist()\n    ]\n    probability_matrix.masked_fill_(torch.tensor(\n        special_tokens_mask, dtype=torch.bool), value=0.0)\n    if tokenizer._pad_token is not None:\n        padding_mask = labels.eq(tokenizer.pad_token_id)\n        probability_matrix.masked_fill_(padding_mask, value=0.0)\n    masked_indices = torch.bernoulli(probability_matrix).bool()\n    labels[~masked_indices] = -100  # We only compute loss on masked tokens\n\n    # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])\n    indices_replaced = torch.bernoulli(\n        torch.full(labels.shape, 0.8)).bool() & masked_indices\n    inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(\n        tokenizer.mask_token)\n\n    # 10% of the time, we replace masked input tokens with random word\n    indices_random = torch.bernoulli(\n        torch.full(labels.shape, 0.5)\n        ).bool() & masked_indices & ~indices_replaced\n    random_words = torch.randint(\n        len(tokenizer), labels.shape,\n        dtype=torch.long)  # len(tokenizer) == #vocab\n    inputs[indices_random] = random_words[indices_random]\n\n    # The rest of the time (10% of the time) we keep the masked input tokens unchanged\n    return inputs, labels\n\n\ndef image_to_tensor(image: np.ndarray, keepdim: bool = True) -> torch.Tensor:\n    \"\"\"Converts a numpy image to a PyTorch 4d tensor image.\n    Args:\n        image (numpy.ndarray): image of the form :math:`(H, W, C)`, :math:`(H, W)` or\n            :math:`(B, H, W, C)`.\n        keepdim (bool): If ``False`` unsqueeze the input image to match the shape\n            :math:`(B, H, W, C)`. Default: ``True``\n    Returns:\n        torch.Tensor: tensor of the form :math:`(B, C, H, W)` if keepdim is ``False``,\n            :math:`(C, H, W)` otherwise.\n    \"\"\"\n    if not isinstance(image, (np.ndarray,)):\n        raise TypeError(\"Input type must be a numpy.ndarray. Got {}\".format(\n            type(image)))\n\n    if len(image.shape) > 4 or len(image.shape) < 2:\n        raise ValueError(\n            \"Input size must be a two, three or four dimensional array\")\n\n    input_shape = image.shape\n    tensor: torch.Tensor = torch.from_numpy(image)\n\n    if len(input_shape) == 2:\n        # (H, W) -> (1, H, W)\n        tensor = tensor.unsqueeze(0)\n    elif len(input_shape) == 3:\n        # (H, W, C) -> (C, H, W)\n        tensor = tensor.permute(2, 0, 1)\n    elif len(input_shape) == 4:\n        # (B, H, W, C) -> (B, C, H, W)\n        tensor = tensor.permute(0, 3, 1, 2)\n        keepdim = True  # no need to unsqueeze\n    else:\n        raise ValueError(\n            \"Cannot process image with shape {}\".format(input_shape))\n\n    return tensor.unsqueeze(0) if not keepdim else tensor\n\n\ndef get_padding(image, max_w, max_h, pad_all=False):\n    # keep the images to upper-left corner\n    if isinstance(image, torch.Tensor):\n        h, w = image.shape[-2:]\n    else:\n        w, h = image.size\n    h_padding, v_padding = max_w - w, max_h - h\n    if pad_all:\n        h_padding /= 2\n        v_padding /= 2\n        l_pad = h_padding if h_padding % 1 == 0 else h_padding+0.5\n        t_pad = v_padding if v_padding % 1 == 0 else v_padding+0.5\n        r_pad = h_padding if h_padding % 1 == 0 else h_padding-0.5\n        b_pad = v_padding if v_padding % 1 == 0 else v_padding-0.5\n    else:\n        l_pad, t_pad = 0, 0\n        r_pad, b_pad = h_padding, v_padding\n    if isinstance(image, torch.Tensor):\n        padding = (int(l_pad), int(r_pad), int(t_pad), int(b_pad))\n    else:\n        padding = (int(l_pad), int(t_pad), int(r_pad), int(b_pad))\n    return padding\n\n\nclass ImagePad(object):\n    def __init__(self, max_w, max_h, fill=0, padding_mode='constant'):\n        assert isinstance(fill, (numbers.Number, str, tuple))\n        assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']\n        self.max_w = max_w\n        self.max_h = max_h\n        self.fill = fill\n        self.padding_mode = padding_mode\n\n    def __call__(self, img):\n        \"\"\"\n        Args:\n            img (PIL Image): Image to be padded.\n\n        Returns:\n            PIL Image: Padded image.\n        \"\"\"\n        if isinstance(img, torch.Tensor):\n            paddings = _quadruple(get_padding(img, self.max_w, self.max_h))\n            return img_tensor_pad(\n                img, paddings,\n                self.padding_mode, self.fill)\n        return img_pad(\n            img, get_padding(img, self.max_w, self.max_h),\n            self.fill, self.padding_mode)\n\n    def __repr__(self):\n        return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\\\n            format(self.fill, self.padding_mode)\n\n\ndef get_resize_size(image, max_size):\n    \"\"\"\n    Args:\n        image: PIL Image or torch.tensor\n        max_size:\n\n    Returns:\n\n    Note the height/width order difference\n    >>> pil_img = Image.open(\"raw_img_tensor.jpg\")\n    >>> pil_img.size\n    (640, 480)  # (width, height)\n    >>> np_img = np.array(pil_img)\n    >>> np_img.shape\n    (480, 640, 3)  # (height, width, 3)\n    \"\"\"\n    # note the order of height and width for different inputs\n    if isinstance(image, torch.Tensor):\n        # width, height = image.shape[-2:]\n        height, width = image.shape[-2:]\n    else:\n        width, height = image.size\n\n    if height >= width:\n        ratio = width*1./height\n        new_height = max_size\n        new_width = new_height * ratio\n    else:\n        ratio = height*1./width\n        new_width = max_size\n        new_height = new_width * ratio\n    size = (int(new_height), int(new_width))\n    return size\n\n\nclass ImageResize(object):\n    \"\"\"Resize the input image (torch.tensor) to the given size.\n\n    Args:\n        max_size (int): Desired output size. If size is a sequence like\n            (h, w), output size will be matched to this. If size is an int,\n            smaller edge of the image will be matched to this number.\n            i.e, if height > width, then image will be rescaled to\n            (size * height / width, size)\n        interpolation (int, optional): Desired interpolation. Default is\n            ``PIL.Image.BILINEAR``\n    \"\"\"\n\n    def __init__(self, max_size, interpolation=Image.BILINEAR):\n        assert isinstance(max_size, int)\n        self.max_size = max_size\n        self.interpolation = interpolation\n\n    def __call__(self, img):\n        \"\"\"\n        Args:\n            img (torch.tensor): Image to be scaled.\n\n        Returns:\n            torch.tensor: Rescaled image.\n        \"\"\"\n        if isinstance(img, torch.Tensor):\n            assert isinstance(self.interpolation, str)\n            return img_tensor_resize(\n                img, size=get_resize_size(img, self.max_size),\n                mode=self.interpolation, align_corners=False)\n        return img_resize(\n            img, get_resize_size(img, self.max_size), self.interpolation)\n\n    def __repr__(self):\n        interpolate_str = _pil_interpolation_to_str[self.interpolation]\n        return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(\n            self.size, interpolate_str)\n\n\ndef get_imagenet_transform(min_size=600, max_size=1000):\n    \"\"\"parameters from https://github.com/pytorch/examples/blob/master/imagenet/main.py\n    This simply crop the center square from the image\n    \"\"\"\n    if min_size != 600:\n        import warnings\n        warnings.warn(f'Warning: min_size is not used in image transform, '\n                      f'setting min_size will have no effect.')\n    return transforms.Compose([\n        ImageResize(max_size, Image.BILINEAR),  # longer side will be resized to 1000\n        ImagePad(max_size, max_size),  # pad to 1000 * 1000\n    ])\n\n\nclass ImageNorm(object):\n    \"\"\"Apply Normalization to Image Pixels on GPU\n    \"\"\"\n    def __init__(self, mean, std):\n        self.mean = torch.tensor(mean).cuda().view(1, 1, 3, 1, 1)\n        self.std = torch.tensor(std).cuda().view(1, 1, 3, 1, 1)\n        # assert max(std) <= 1 and min(std) >= 0\\\n        #     or max(mean) <= 1 and min(mean) >= 0,\\\n        #         \"Please provide mean or std within range [0, 1]\"\n\n    def __call__(self, img):\n        \"\"\"\n        Args:\n            img: float image tensors, (B, N, 3, H, W)\n\n        Returns:\n            img: normalized float image tensors\n        \"\"\"\n        if torch.max(img) > 1 and self.mean.max() <= 1:\n            img.div_(255.)\n        return img.sub_(self.mean).div_(self.std)\n\n\ndef chunk_list(examples, chunk_size=2, pad_to_divisible=True):\n    \"\"\"\n    Args:\n        examples: iterable, examples grouped by image/video\n        chunk_size: int, number of examples in each chunk.\n        pad_to_divisible: bool, pad the examples to be divisible by chunk_size.\n    >>> test_examples = [3, 4, 5, 6, 7]\n    >>> chunk_list(test_examples, chunk_size=2, pad_to_divisible=True)\n    [[3, 4], [5, 6], [7, 7]]  # the lst element has some randomness\n    >>> chunk_list(test_examples, chunk_size=2, pad_to_divisible=False)\n    [[3, 4], [5, 6], [7]]\n    \"\"\"\n    n_examples = len(examples)\n    remainder = n_examples % chunk_size\n    if pad_to_divisible and remainder > 0:\n        n_pad = chunk_size - remainder\n        pad = random.choices(examples, k=n_pad)  # with replacement\n        examples = examples + pad\n        n_examples = len(examples)\n        remainder = 0\n    chunked_examples = []\n    n_chunks = int(n_examples / chunk_size)\n    n_chunks = n_chunks + 1 if remainder > 0 else n_chunks\n    for i in range(n_chunks):\n        chunked_examples.append(examples[i*chunk_size: (i+1)*chunk_size])\n    return chunked_examples\n\n\ndef mk_input_group(key_grouped_examples, max_n_example_per_group=2, is_train=True,\n                   example_unique_key=None):\n    \"\"\" Re-organize examples into groups. Each input group will have a single image paired\n    with X (X=max_n_example_per_img) examples. Images with total #examples > X will be\n    split into multiple groups. In the case a group has < X examples, we will copy\n    the examples to make the group has X examples.\n    Args:\n        key_grouped_examples: dict, each key is image/video id,\n            each value is a list(example) associated with this image/video\n        max_n_example_per_group: int, pair max #examples with each image/video.\n           Note that each image can have multiple groups.\n        is_train: bool, if True, copy the examples to make sure each input\n            group has max_n_example_per_group examples.\n        example_unique_key: str, used to make sure no inputs are discarded by matching\n            the input and output ids specified by `example_unique_key`\n    \"\"\"\n    input_groups = []  # each element is (id, list(example))\n    for k, examples in key_grouped_examples.items():\n        chunked_examples = chunk_list(examples,\n                                      chunk_size=max_n_example_per_group,\n                                      pad_to_divisible=is_train)\n        for c in chunked_examples:\n            # if len(c) == 0:\n            #     continue\n            input_groups.append((k, c))\n\n    if example_unique_key is not None:\n        print(f\"Using example_unique_key {example_unique_key} to check whether input and output ids m\")\n        # sanity check: make sure we did not discard any input example by accident.\n        input_question_ids = flat_list_of_lists(\n            [[sub_e[example_unique_key] for sub_e in e] for e in key_grouped_examples.values()])\n        output_question_ids = flat_list_of_lists(\n            [[sub_e[example_unique_key] for sub_e in e[1]] for e in input_groups])\n        assert set(input_question_ids) == set(output_question_ids), \"You are missing \"\n    return input_groups\n\n\ndef repeat_tensor_rows(raw_tensor, row_repeats):\n    \"\"\" repeat raw_tensor[i] row_repeats[i] times.\n    Args:\n        raw_tensor: (B, *)\n        row_repeats: list(int), len(row_repeats) == len(raw_tensor)\n    \"\"\"\n    assert len(raw_tensor) == len(raw_tensor), \"Has to be the same length\"\n    if sum(row_repeats) == len(row_repeats):\n        return raw_tensor\n    else:\n        indices = torch.LongTensor(\n            flat_list_of_lists([[i] * r for i, r in enumerate(row_repeats)])\n        ).to(raw_tensor.device)\n        return raw_tensor.index_select(0, indices)\n\n\n\n#### Data utils\nimport io\ndef load_decompress_img_from_lmdb_value(lmdb_value):\n    \"\"\"\n    Args:\n        lmdb_value: image binary from\n            with open(filepath, \"rb\") as f:\n                lmdb_value = f.read()\n\n    Returns:\n        PIL image, (h, w, c)\n    \"\"\"\n    io_stream = io.BytesIO(lmdb_value)\n    img = Image.open(io_stream, mode=\"r\")\n    return img\n\n"
  },
  {
    "path": "src/utils/dataloader.py",
    "content": "# coding: utf-8\n# @email: enoche.chow@gmail.com\n\"\"\"\nWrap dataset into dataloader\n################################################\n\"\"\"\nimport math\nimport torch\nimport random\nimport numpy as np\nfrom logging import getLogger\nfrom scipy.sparse import coo_matrix\n\n\nclass AbstractDataLoader(object):\n    \"\"\":class:`AbstractDataLoader` is an abstract object which would return a batch of data which is loaded by\n    :class:`~recbole.data.interaction.Interaction` when it is iterated.\n    And it is also the ancestor of all other dataloader.\n\n    Args:\n        config (Config): The config of dataloader.\n        dataset (Dataset): The dataset of dataloader.\n        batch_size (int, optional): The batch_size of dataloader. Defaults to ``1``.\n        dl_format (InputType, optional): The input type of dataloader. Defaults to\n            :obj:`~recbole.utils.enum_type.InputType.POINTWISE`.\n        shuffle (bool, optional): Whether the dataloader will be shuffle after a round. Defaults to ``False``.\n\n    Attributes:\n        dataset (Dataset): The dataset of this dataloader.\n        shuffle (bool): If ``True``, dataloader will shuffle before every epoch.\n        real_time (bool): If ``True``, dataloader will do data pre-processing,\n            such as neg-sampling and data-augmentation.\n        pr (int): Pointer of dataloader.\n        step (int): The increment of :attr:`pr` for each batch.\n        batch_size (int): The max interaction number for all batch.\n    \"\"\"\n    def __init__(self, config, dataset, additional_dataset=None,\n                 batch_size=1, neg_sampling=False, shuffle=False):\n        self.config = config\n        self.logger = getLogger()\n        self.dataset = dataset\n        self.dataset_bk = self.dataset.copy(self.dataset.df)\n        # if config['model_type'] == ModelType.GENERAL:\n        #     self.dataset.df.drop(self.dataset.ts_id, inplace=True, axis=1)\n        # elif config['model_type'] == ModelType.SEQUENTIAL:\n        #     # sort instances\n        #     pass\n        self.additional_dataset = additional_dataset\n        self.batch_size = batch_size\n        self.step = batch_size\n        self.shuffle = shuffle\n        self.neg_sampling = neg_sampling\n        self.device = config['device']\n\n        self.sparsity = 1 - self.dataset.inter_num / self.dataset.user_num / self.dataset.item_num\n        self.pr = 0\n        self.inter_pr = 0\n\n    def pretrain_setup(self):\n        \"\"\"This function can be used to deal with some problems after essential args are initialized,\n        such as the batch-size-adaptation when neg-sampling is needed, and so on. By default, it will do nothing.\n        \"\"\"\n        pass\n\n    def data_preprocess(self):\n        \"\"\"This function is used to do some data preprocess, such as pre-neg-sampling and pre-data-augmentation.\n        By default, it will do nothing.\n        \"\"\"\n        pass\n\n    def __len__(self):\n        return math.ceil(self.pr_end / self.step)\n\n    def __iter__(self):\n        if self.shuffle:\n            self._shuffle()\n        return self\n\n    def __next__(self):\n        if self.pr >= self.pr_end:\n            self.pr = 0\n            self.inter_pr = 0\n            raise StopIteration()\n        return self._next_batch_data()\n\n    @property\n    def pr_end(self):\n        \"\"\"This property marks the end of dataloader.pr which is used in :meth:`__next__()`.\"\"\"\n        raise NotImplementedError('Method [pr_end] should be implemented')\n\n    def _shuffle(self):\n        \"\"\"Shuffle the order of data, and it will be called by :meth:`__iter__()` if self.shuffle is True.\n        \"\"\"\n        raise NotImplementedError('Method [shuffle] should be implemented.')\n\n    def _next_batch_data(self):\n        \"\"\"Assemble next batch of data in form of Interaction, and return these data.\n\n        Returns:\n            Interaction: The next batch of data.\n        \"\"\"\n        raise NotImplementedError('Method [next_batch_data] should be implemented.')\n\n\nclass TrainDataLoader(AbstractDataLoader):\n    \"\"\"\n    General dataloader with negative sampling.\n    \"\"\"\n    def __init__(self, config, dataset, batch_size=1, shuffle=False):\n        super().__init__(config, dataset, additional_dataset=None,\n                         batch_size=batch_size, neg_sampling=True, shuffle=shuffle)\n\n        # special for training dataloader\n        self.history_items_per_u = dict()\n        # full items in training.\n        self.all_items = self.dataset.df[self.dataset.iid_field].unique().tolist()\n        self.all_uids = self.dataset.df[self.dataset.uid_field].unique()\n        self.all_items_set = set(self.all_items)\n        self.all_users_set = set(self.all_uids)\n        self.all_item_len = len(self.all_items)\n        # if full sampling\n        self.use_full_sampling = config['use_full_sampling']\n\n        if config['use_neg_sampling']:\n            if self.use_full_sampling:\n                self.sample_func = self._get_full_uids_sample\n            else:\n                self.sample_func = self._get_neg_sample\n        else:\n            self.sample_func = self._get_non_neg_sample\n\n        self._get_history_items_u()\n        self.neighborhood_loss_required = config['use_neighborhood_loss']\n        if self.neighborhood_loss_required:\n            self.history_users_per_i = {}\n            self._get_history_users_i()\n            self.user_user_dict = self._get_my_neighbors(self.config['USER_ID_FIELD'])\n            self.item_item_dict = self._get_my_neighbors(self.config['ITEM_ID_FIELD'])\n\n    def pretrain_setup(self):\n        \"\"\"\n        Reset dataloader. Outputing the same positive & negative samples with each training.\n        :return:\n        \"\"\"\n        # sort & random\n        if self.shuffle:\n            self.dataset = self.dataset_bk.copy(self.dataset_bk.df)\n        self.all_items.sort()\n        if self.use_full_sampling:\n            self.all_uids.sort()\n        random.shuffle(self.all_items)\n        # reorder dataset as default (chronological order)\n        #self.dataset.sort_by_chronological()\n\n    def inter_matrix(self, form='coo', value_field=None):\n        \"\"\"Get sparse matrix that describe interactions between user_id and item_id.\n\n        Sparse matrix has shape (user_num, item_num).\n\n        For a row of <src, tgt>, ``matrix[src, tgt] = 1`` if ``value_field`` is ``None``,\n        else ``matrix[src, tgt] = self.inter_feat[src, tgt]``.\n\n        Args:\n            form (str, optional): Sparse matrix format. Defaults to ``coo``.\n            value_field (str, optional): Data of sparse matrix, which should exist in ``df_feat``.\n                Defaults to ``None``.\n\n        Returns:\n            scipy.sparse: Sparse matrix in form ``coo`` or ``csr``.\n        \"\"\"\n        if not self.dataset.uid_field or not self.dataset.iid_field:\n            raise ValueError('dataset doesn\\'t exist uid/iid, thus can not converted to sparse matrix')\n        return self._create_sparse_matrix(self.dataset.df, self.dataset.uid_field,\n                                          self.dataset.iid_field, form, value_field)\n\n    def _create_sparse_matrix(self, df_feat, source_field, target_field, form='coo', value_field=None):\n        \"\"\"Get sparse matrix that describe relations between two fields.\n\n        Source and target should be token-like fields.\n\n        Sparse matrix has shape (``self.num(source_field)``, ``self.num(target_field)``).\n\n        For a row of <src, tgt>, ``matrix[src, tgt] = 1`` if ``value_field`` is ``None``,\n        else ``matrix[src, tgt] = df_feat[value_field][src, tgt]``.\n\n        Args:\n            df_feat (pandas.DataFrame): Feature where src and tgt exist.\n            form (str, optional): Sparse matrix format. Defaults to ``coo``.\n            value_field (str, optional): Data of sparse matrix, which should exist in ``df_feat``.\n                Defaults to ``None``.\n\n        Returns:\n            scipy.sparse: Sparse matrix in form ``coo`` or ``csr``.\n        \"\"\"\n        src = df_feat[source_field].values\n        tgt = df_feat[target_field].values\n        if value_field is None:\n            data = np.ones(len(df_feat))\n        else:\n            if value_field not in df_feat.columns:\n                raise ValueError('value_field [{}] should be one of `df_feat`\\'s features.'.format(value_field))\n            data = df_feat[value_field].values\n        mat = coo_matrix((data, (src, tgt)), shape=(self.dataset.user_num, self.dataset.item_num))\n\n        if form == 'coo':\n            return mat\n        elif form == 'csr':\n            return mat.tocsr()\n        else:\n            raise NotImplementedError('sparse matrix format [{}] has not been implemented.'.format(form))\n\n    @property\n    def pr_end(self):\n        if self.use_full_sampling:\n            return len(self.all_uids)\n        return len(self.dataset)\n\n    def _shuffle(self):\n        self.dataset.shuffle()\n        if self.use_full_sampling:\n            np.random.shuffle(self.all_uids)\n\n    def _next_batch_data(self):\n        return self.sample_func()\n\n    def _get_neg_sample(self):\n        cur_data = self.dataset[self.pr: self.pr + self.step]\n        self.pr += self.step\n        # to tensor\n        user_tensor = torch.tensor(cur_data[self.config['USER_ID_FIELD']].values).type(torch.LongTensor).to(self.device)\n        item_tensor = torch.tensor(cur_data[self.config['ITEM_ID_FIELD']].values).type(torch.LongTensor).to(self.device)\n        batch_tensor = torch.cat((torch.unsqueeze(user_tensor, 0),\n                                  torch.unsqueeze(item_tensor, 0)))\n        u_ids = cur_data[self.config['USER_ID_FIELD']]\n        # sampling negative items only in the dataset (train)\n        neg_ids = self._sample_neg_ids(u_ids).to(self.device)\n        # for neighborhood loss\n        if self.neighborhood_loss_required:\n            i_ids = cur_data[self.config['ITEM_ID_FIELD']]\n            pos_neighbors, neg_neighbors = self._get_neighborhood_samples(i_ids, self.config['ITEM_ID_FIELD'])\n            pos_neighbors, neg_neighbors = pos_neighbors.to(self.device), neg_neighbors.to(self.device)\n\n            batch_tensor = torch.cat((batch_tensor, neg_ids.unsqueeze(0),\n                                      pos_neighbors.unsqueeze(0), neg_neighbors.unsqueeze(0)))\n\n        # merge negative samples\n        else:\n            batch_tensor = torch.cat((batch_tensor, neg_ids.unsqueeze(0)))\n\n        return batch_tensor\n\n    def _get_non_neg_sample(self):\n        cur_data = self.dataset[self.pr: self.pr + self.step]\n        self.pr += self.step\n        # to tensor\n        user_tensor = torch.tensor(cur_data[self.config['USER_ID_FIELD']].values).type(torch.LongTensor).to(self.device)\n        item_tensor = torch.tensor(cur_data[self.config['ITEM_ID_FIELD']].values).type(torch.LongTensor).to(self.device)\n        batch_tensor = torch.cat((torch.unsqueeze(user_tensor, 0),\n                                  torch.unsqueeze(item_tensor, 0)))\n        return batch_tensor\n\n    def _get_full_uids_sample(self):\n        user_tensor = torch.tensor(self.all_uids[self.pr: self.pr + self.step]).type(torch.LongTensor).to(self.device)\n        self.pr += self.step\n        return user_tensor\n\n    def _sample_neg_ids(self, u_ids):\n        neg_ids = []\n        for u in u_ids:\n            # random 1 item\n            iid = self._random()\n            while iid in self.history_items_per_u[u]:\n                iid = self._random()\n            neg_ids.append(iid)\n        return torch.tensor(neg_ids).type(torch.LongTensor)\n\n    def _get_my_neighbors(self, id_str):\n        ret_dict = {}\n        a2b_dict = self.history_items_per_u if id_str == self.config['USER_ID_FIELD'] else self.history_users_per_i\n        b2a_dict = self.history_users_per_i if id_str == self.config['USER_ID_FIELD'] else self.history_items_per_u\n        for i, j in a2b_dict.items():\n            k = set()\n            for m in j:\n                k |= b2a_dict.get(m, set()).copy()\n            k.discard(i)                        # remove myself\n            ret_dict[i] = k\n        return ret_dict\n\n    def _get_neighborhood_samples(self, ids, id_str):\n        a2a_dict = self.user_user_dict if id_str == self.config['USER_ID_FIELD'] else self.item_item_dict\n        all_set = self.all_users_set if id_str == self.config['USER_ID_FIELD'] else self.all_items_set\n        pos_ids, neg_ids = [], []\n        for i in ids:\n            pos_ids_my = a2a_dict[i]\n            if len(pos_ids_my) <= 0 or len(pos_ids_my)/len(all_set) > 0.8:\n                pos_ids.append(0)\n                neg_ids.append(0)\n                continue\n            pos_id = random.sample(pos_ids_my, 1)[0]\n            pos_ids.append(pos_id)\n            neg_id = random.sample(all_set, 1)[0]\n            while neg_id in pos_ids_my:\n                neg_id = random.sample(all_set, 1)[0]\n            neg_ids.append(neg_id)\n        return torch.tensor(pos_ids).type(torch.LongTensor), torch.tensor(neg_ids).type(torch.LongTensor)\n\n    def _random(self):\n        rd_id = random.sample(self.all_items, 1)[0]\n        return rd_id\n\n    def _get_history_items_u(self):\n        uid_field = self.dataset.uid_field\n        iid_field = self.dataset.iid_field\n        # load avail items for all uid\n        uid_freq = self.dataset.df.groupby(uid_field)[iid_field]\n        for u, u_ls in uid_freq:\n            self.history_items_per_u[u] = set(u_ls.values)\n        return self.history_items_per_u\n\n    def _get_history_users_i(self):\n        uid_field = self.dataset.uid_field\n        iid_field = self.dataset.iid_field\n        # load avail items for all uid\n        iid_freq = self.dataset.df.groupby(iid_field)[uid_field]\n        for i, u_ls in iid_freq:\n            self.history_users_per_i[i] = set(u_ls.values)\n        return self.history_users_per_i\n\n\nclass EvalDataLoader(AbstractDataLoader):\n    \"\"\"\n        additional_dataset: training dataset in evaluation\n    \"\"\"\n    def __init__(self, config, dataset, additional_dataset=None,\n                 batch_size=1, shuffle=False):\n        super().__init__(config, dataset, additional_dataset=additional_dataset,\n                         batch_size=batch_size, neg_sampling=False, shuffle=shuffle)\n\n        if additional_dataset is None:\n            raise ValueError('Training datasets is nan')\n        self.eval_items_per_u = []\n        self.eval_len_list = []\n        self.train_pos_len_list = []\n\n        self.eval_u = self.dataset.df[self.dataset.uid_field].unique()\n        # special for eval dataloader\n        self.pos_items_per_u = self._get_pos_items_per_u(self.eval_u).to(self.device)\n        self._get_eval_items_per_u(self.eval_u)\n        # to device\n        self.eval_u = torch.tensor(self.eval_u).type(torch.LongTensor).to(self.device)\n\n    @property\n    def pr_end(self):\n        return self.eval_u.shape[0]\n\n    def _shuffle(self):\n        self.dataset.shuffle()\n\n    def _next_batch_data(self):\n        inter_cnt = sum(self.train_pos_len_list[self.pr: self.pr+self.step])\n        batch_users = self.eval_u[self.pr: self.pr + self.step]\n        batch_mask_matrix = self.pos_items_per_u[:, self.inter_pr: self.inter_pr+inter_cnt].clone()\n        # user_ids to index\n        batch_mask_matrix[0] -= self.pr\n        self.inter_pr += inter_cnt\n        self.pr += self.step\n\n        return [batch_users, batch_mask_matrix]\n\n    def _get_pos_items_per_u(self, eval_users):\n        \"\"\"\n        history items in training dataset.\n        masking out positive items in evaluation\n        :return:\n        user_id - item_ids matrix\n        [[0, 0, ... , 1, ...],\n         [0, 1, ... , 0, ...]]\n        \"\"\"\n        uid_field = self.additional_dataset.uid_field\n        iid_field = self.additional_dataset.iid_field\n        # load avail items for all uid\n        uid_freq = self.additional_dataset.df.groupby(uid_field)[iid_field]\n        u_ids = []\n        i_ids = []\n        for i, u in enumerate(eval_users):\n            u_ls = uid_freq.get_group(u).values\n            i_len = len(u_ls)\n            self.train_pos_len_list.append(i_len)\n            u_ids.extend([i]*i_len)\n            i_ids.extend(u_ls)\n        return torch.tensor([u_ids, i_ids]).type(torch.LongTensor)\n\n    def _get_eval_items_per_u(self, eval_users):\n        \"\"\"\n        get evaluated items for each u\n        :return:\n        \"\"\"\n        uid_field = self.dataset.uid_field\n        iid_field = self.dataset.iid_field\n        # load avail items for all uid\n        uid_freq = self.dataset.df.groupby(uid_field)[iid_field]\n        for u in eval_users:\n            u_ls = uid_freq.get_group(u).values\n            self.eval_len_list.append(len(u_ls))\n            self.eval_items_per_u.append(u_ls)\n        self.eval_len_list = np.asarray(self.eval_len_list)\n\n    # return pos_items for each u\n    def get_eval_items(self):\n        return self.eval_items_per_u\n\n    def get_eval_len_list(self):\n        return self.eval_len_list\n\n    def get_eval_users(self):\n        return self.eval_u.cpu()\n\n\n"
  },
  {
    "path": "src/utils/dataset.py",
    "content": "# coding: utf-8\n# @email: enoche.chow@gmail.com\n#\n# updated: Mar. 25, 2022\n# Filled non-existing raw features with non-zero after encoded from encoders\n\n\"\"\"\nData pre-processing\n##########################\n\"\"\"\nfrom logging import getLogger\nfrom collections import Counter\nimport os\nimport pandas as pd\nimport numpy as np\nimport torch\nfrom utils.data_utils import (ImageResize, ImagePad, image_to_tensor, load_decompress_img_from_lmdb_value)\nimport lmdb\n\n\nclass RecDataset(object):\n    def __init__(self, config, df=None):\n        self.config = config\n        self.logger = getLogger()\n\n        # data path & files\n        self.dataset_name = config['dataset']\n        self.dataset_path = os.path.abspath(config['data_path']+self.dataset_name)\n\n        # dataframe\n        self.uid_field = self.config['USER_ID_FIELD']\n        self.iid_field = self.config['ITEM_ID_FIELD']\n        self.splitting_label = self.config['inter_splitting_label']\n\n        if df is not None:\n            self.df = df\n            return\n        # if all files exists\n        check_file_list = [self.config['inter_file_name']]\n        for i in check_file_list:\n            file_path = os.path.join(self.dataset_path, i)\n            if not os.path.isfile(file_path):\n                raise ValueError('File {} not exist'.format(file_path))\n\n        # load rating file from data path?\n        self.load_inter_graph(config['inter_file_name'])\n        self.item_num = int(max(self.df[self.iid_field].values)) + 1\n        self.user_num = int(max(self.df[self.uid_field].values)) + 1\n\n    def load_inter_graph(self, file_name):\n        inter_file = os.path.join(self.dataset_path, file_name)\n        cols = [self.uid_field, self.iid_field, self.splitting_label]\n        self.df = pd.read_csv(inter_file, usecols=cols, sep=self.config['field_separator'])\n        if not self.df.columns.isin(cols).all():\n            raise ValueError('File {} lost some required columns.'.format(inter_file))\n\n    def split(self):\n        dfs = []\n        # splitting into training/validation/test\n        for i in range(3):\n            temp_df = self.df[self.df[self.splitting_label] == i].copy()\n            temp_df.drop(self.splitting_label, inplace=True, axis=1)        # no use again\n            dfs.append(temp_df)\n        if self.config['filter_out_cod_start_users']:\n            # filtering out new users in val/test sets\n            train_u = set(dfs[0][self.uid_field].values)\n            for i in [1, 2]:\n                dropped_inter = pd.Series(True, index=dfs[i].index)\n                dropped_inter ^= dfs[i][self.uid_field].isin(train_u)\n                dfs[i].drop(dfs[i].index[dropped_inter], inplace=True)\n\n        # wrap as RecDataset\n        full_ds = [self.copy(_) for _ in dfs]\n        return full_ds\n\n    def copy(self, new_df):\n        \"\"\"Given a new interaction feature, return a new :class:`Dataset` object,\n                whose interaction feature is updated with ``new_df``, and all the other attributes the same.\n\n                Args:\n                    new_df (pandas.DataFrame): The new interaction feature need to be updated.\n\n                Returns:\n                    :class:`~Dataset`: the new :class:`~Dataset` object, whose interaction feature has been updated.\n                \"\"\"\n        nxt = RecDataset(self.config, new_df)\n\n        nxt.item_num = self.item_num\n        nxt.user_num = self.user_num\n        return nxt\n\n    def get_user_num(self):\n        return self.user_num\n\n    def get_item_num(self):\n        return self.item_num\n\n    def shuffle(self):\n        \"\"\"Shuffle the interaction records inplace.\n        \"\"\"\n        self.df = self.df.sample(frac=1, replace=False).reset_index(drop=True)\n\n    def __len__(self):\n        return len(self.df)\n\n    def __getitem__(self, idx):\n        # Series result\n        return self.df.iloc[idx]\n\n    def __repr__(self):\n        return self.__str__()\n\n    def __str__(self):\n        info = [self.dataset_name]\n        self.inter_num = len(self.df)\n        uni_u = pd.unique(self.df[self.uid_field])\n        uni_i = pd.unique(self.df[self.iid_field])\n        tmp_user_num, tmp_item_num = 0, 0\n        if self.uid_field:\n            tmp_user_num = len(uni_u)\n            avg_actions_of_users = self.inter_num/tmp_user_num\n            info.extend(['The number of users: {}'.format(tmp_user_num),\n                         'Average actions of users: {}'.format(avg_actions_of_users)])\n        if self.iid_field:\n            tmp_item_num = len(uni_i)\n            avg_actions_of_items = self.inter_num/tmp_item_num\n            info.extend(['The number of items: {}'.format(tmp_item_num),\n                         'Average actions of items: {}'.format(avg_actions_of_items)])\n        info.append('The number of inters: {}'.format(self.inter_num))\n        if self.uid_field and self.iid_field:\n            sparsity = 1 - self.inter_num / tmp_user_num / tmp_item_num\n            info.append('The sparsity of the dataset: {}%'.format(sparsity * 100))\n        return '\\n'.join(info)\n"
  },
  {
    "path": "src/utils/logger.py",
    "content": "# coding: utf-8\n# @email: enoche.chow@gmail.com\n\n\"\"\"\n###############################\n\"\"\"\n\nimport logging\nimport os\nfrom utils.utils import get_local_time\n\n\ndef init_logger(config):\n    \"\"\"\n    A logger that can show a message on standard output and write it into the\n    file named `filename` simultaneously.\n    All the message that you want to log MUST be str.\n\n    Args:\n        config (Config): An instance object of Config, used to record parameter information.\n    \"\"\"\n    LOGROOT = './log/'\n    dir_name = os.path.dirname(LOGROOT)\n    if not os.path.exists(dir_name):\n        os.makedirs(dir_name)\n\n    logfilename = '{}-{}-{}.log'.format(config['model'], config['dataset'], get_local_time())\n\n    logfilepath = os.path.join(LOGROOT, logfilename)\n\n    filefmt = \"%(asctime)-15s %(levelname)s %(message)s\"\n    filedatefmt = \"%a %d %b %Y %H:%M:%S\"\n    fileformatter = logging.Formatter(filefmt, filedatefmt)\n\n    sfmt = u\"%(asctime)-15s %(levelname)s %(message)s\"\n    sdatefmt = \"%d %b %H:%M\"\n    sformatter = logging.Formatter(sfmt, sdatefmt)\n    if config['state'] is None or config['state'].lower() == 'info':\n        level = logging.INFO\n    elif config['state'].lower() == 'debug':\n        level = logging.DEBUG\n    elif config['state'].lower() == 'error':\n        level = logging.ERROR\n    elif config['state'].lower() == 'warning':\n        level = logging.WARNING\n    elif config['state'].lower() == 'critical':\n        level = logging.CRITICAL\n    else:\n        level = logging.INFO\n    # comment following 3 lines and handlers = [sh, fh] to cancel file dump.\n    fh = logging.FileHandler(logfilepath, 'w', 'utf-8')\n    fh.setLevel(level)\n    fh.setFormatter(fileformatter)\n\n    sh = logging.StreamHandler()\n    sh.setLevel(level)\n    sh.setFormatter(sformatter)\n\n    logging.basicConfig(\n        level=level,\n        #handlers=[sh]\n        handlers = [sh, fh]\n    )\n\n\n"
  },
  {
    "path": "src/utils/metrics.py",
    "content": "# encoding: utf-8\n# @email: enoche.chow@gmail.com\n\"\"\"\n############################\n\"\"\"\n\nfrom logging import getLogger\n\nimport numpy as np\n\n\ndef recall_(pos_index, pos_len):\n    # Recall: average single users recall ratio.\n    rec_ret = np.cumsum(pos_index, axis=1) / pos_len.reshape(-1, 1)\n    return rec_ret.mean(axis=0)\n\n\ndef recall2_(pos_index, pos_len):\n    r\"\"\"\n    All hits are summed up and then averaged for recall.\n    :param pos_index:\n    :param pos_len:\n    :return:\n    \"\"\"\n    rec_cum = np.cumsum(pos_index, axis=1)\n    rec_ret = rec_cum.sum(axis=0) / pos_len.sum()\n    return rec_ret\n\n\ndef ndcg_(pos_index, pos_len):\n    r\"\"\"NDCG_ (also known as normalized discounted cumulative gain) is a measure of ranking quality.\n    Through normalizing the score, users and their recommendation list results in the whole test set can be evaluated.\n    .. _NDCG: https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG\n\n    .. math::\n        \\begin{gather}\n            \\mathrm {DCG@K}=\\sum_{i=1}^{K} \\frac{2^{rel_i}-1}{\\log_{2}{(i+1)}}\\\\\n            \\mathrm {IDCG@K}=\\sum_{i=1}^{K}\\frac{1}{\\log_{2}{(i+1)}}\\\\\n            \\mathrm {NDCG_u@K}=\\frac{DCG_u@K}{IDCG_u@K}\\\\\n            \\mathrm {NDCG@K}=\\frac{\\sum \\nolimits_{u \\in u^{te}NDCG_u@K}}{|u^{te}|}\n        \\end{gather}\n\n    :math:`K` stands for recommending :math:`K` items.\n    And the :math:`rel_i` is the relevance of the item in position :math:`i` in the recommendation list.\n    :math:`2^{rel_i}` equals to 1 if the item hits otherwise 0.\n    :math:`U^{te}` is for all users in the test set.\n    \"\"\"\n    len_rank = np.full_like(pos_len, pos_index.shape[1])\n    idcg_len = np.where(pos_len > len_rank, len_rank, pos_len)\n\n    iranks = np.zeros_like(pos_index, dtype=np.float)\n    iranks[:, :] = np.arange(1, pos_index.shape[1] + 1)\n    idcg = np.cumsum(1.0 / np.log2(iranks + 1), axis=1)\n    for row, idx in enumerate(idcg_len):\n        idcg[row, idx:] = idcg[row, idx - 1]\n\n    ranks = np.zeros_like(pos_index, dtype=np.float)\n    ranks[:, :] = np.arange(1, pos_index.shape[1] + 1)\n    dcg = 1.0 / np.log2(ranks + 1)\n    dcg = np.cumsum(np.where(pos_index, dcg, 0), axis=1)\n\n    result = dcg / idcg\n    return result.mean(axis=0)\n\n\ndef map_(pos_index, pos_len):\n    r\"\"\"MAP_ (also known as Mean Average Precision) The MAP is meant to calculate Avg. Precision for the relevant items.\n    Note:\n        In this case the normalization factor used is :math:`\\frac{1}{\\min (m,N)}`, which prevents your AP score from\n        being unfairly suppressed when your number of recommendations couldn't possibly capture all the correct ones.\n\n    .. _map: http://sdsawtelle.github.io/blog/output/mean-average-precision-MAP-for-recommender-systems.html#MAP-for-Recommender-Algorithms\n\n    .. math::\n        \\begin{align*}\n        \\mathrm{AP@N} &= \\frac{1}{\\mathrm{min}(m,N)}\\sum_{k=1}^N P(k) \\cdot rel(k) \\\\\n        \\mathrm{MAP@N}& = \\frac{1}{|U|}\\sum_{u=1}^{|U|}(\\mathrm{AP@N})_u\n        \\end{align*}\n    \"\"\"\n    pre = pos_index.cumsum(axis=1) / np.arange(1, pos_index.shape[1] + 1)\n    sum_pre = np.cumsum(pre * pos_index.astype(np.float), axis=1)\n    len_rank = np.full_like(pos_len, pos_index.shape[1])\n    actual_len = np.where(pos_len > len_rank, len_rank, pos_len)\n    result = np.zeros_like(pos_index, dtype=np.float)\n    for row, lens in enumerate(actual_len):\n        ranges = np.arange(1, pos_index.shape[1]+1)\n        ranges[lens:] = ranges[lens - 1]\n        result[row] = sum_pre[row] / ranges\n    return result.mean(axis=0)\n\n\ndef precision_(pos_index, pos_len):\n    r\"\"\"Precision_ (also called positive predictive value) is the fraction of\n    relevant instances among the retrieved instances\n    .. _precision: https://en.wikipedia.org/wiki/Precision_and_recall#Precision\n\n    .. math::\n        \\mathrm {Precision@K} = \\frac{|Rel_u \\cap Rec_u|}{Rec_u}\n\n    :math:`Rel_u` is the set of items relavent to user :math:`U`,\n    :math:`Rec_u` is the top K items recommended to users.\n    We obtain the result by calculating the average :math:`Precision@K` of each user.\n    \"\"\"\n    rec_ret = pos_index.cumsum(axis=1) / np.arange(1, pos_index.shape[1] + 1)\n    return rec_ret.mean(axis=0)\n\n\n\"\"\"Function name and function mapper.\nUseful when we have to serialize evaluation metric names\nand call the functions based on deserialized names\n\"\"\"\nmetrics_dict = {\n    'ndcg': ndcg_,\n    'recall': recall_,\n    'recall2': recall2_,\n    'precision': precision_,\n    'map': map_,\n}\n"
  },
  {
    "path": "src/utils/misc.py",
    "content": "# coding: utf-8\n# @email: enoche.chow@gmail.com\n\"\"\"\nmodified from UNITER\n\"\"\"\nimport json\nimport random\nimport sys\n\nimport torch\nimport numpy as np\n\n\nclass NoOp(object):\n    \"\"\" useful for distributed training No-Ops \"\"\"\n    def __getattr__(self, name):\n        return self.noop\n\n    def noop(self, *args, **kwargs):\n        return\n\n\ndef set_random_seed(seed):\n    random.seed(seed)\n    np.random.seed(seed)\n    torch.manual_seed(seed)\n    torch.cuda.manual_seed_all(seed)\n\n\ndef zero_none_grad(model):\n    for p in model.parameters():\n        if p.grad is None and p.requires_grad:\n            p.grad = p.data.new(p.size()).zero_()\n"
  },
  {
    "path": "src/utils/quick_start.py",
    "content": "# coding: utf-8\n# @email: enoche.chow@gmail.com\n\n\"\"\"\nRun application\n##########################\n\"\"\"\nfrom logging import getLogger\nfrom itertools import product\nfrom utils.dataset import RecDataset\nfrom utils.dataloader import TrainDataLoader, EvalDataLoader\nfrom utils.logger import init_logger\nfrom utils.configurator import Config\nfrom utils.utils import init_seed, get_model, get_trainer, dict2str\nimport platform\nimport os\n\n\ndef quick_start(model, dataset, config_dict, save_model=True, mg=False):\n    # merge config dict\n    config = Config(model, dataset, config_dict, mg)\n    init_logger(config)\n    logger = getLogger()\n    # print config infor\n    logger.info('██Server: \\t' + platform.node())\n    logger.info('██Dir: \\t' + os.getcwd() + '\\n')\n    logger.info(config)\n\n    # load data\n    dataset = RecDataset(config)\n    # print dataset statistics\n    logger.info(str(dataset))\n\n    train_dataset, valid_dataset, test_dataset = dataset.split()\n    logger.info('\\n====Training====\\n' + str(train_dataset))\n    logger.info('\\n====Validation====\\n' + str(valid_dataset))\n    logger.info('\\n====Testing====\\n' + str(test_dataset))\n\n    # wrap into dataloader\n    train_data = TrainDataLoader(config, train_dataset, batch_size=config['train_batch_size'], shuffle=True)\n    (valid_data, test_data) = (\n        EvalDataLoader(config, valid_dataset, additional_dataset=train_dataset, batch_size=config['eval_batch_size']),\n        EvalDataLoader(config, test_dataset, additional_dataset=train_dataset, batch_size=config['eval_batch_size']))\n\n    ############ Dataset loadded, run model\n    hyper_ret = []\n    val_metric = config['valid_metric'].lower()\n    best_test_value = 0.0\n    idx = best_test_idx = 0\n\n    logger.info('\\n\\n=================================\\n\\n')\n\n    # hyper-parameters\n    hyper_ls = []\n    if \"seed\" not in config['hyper_parameters']:\n        config['hyper_parameters'] = ['seed'] + config['hyper_parameters']\n    for i in config['hyper_parameters']:\n        hyper_ls.append(config[i] or [None])\n    # combinations\n    combinators = list(product(*hyper_ls))\n    total_loops = len(combinators)\n    for hyper_tuple in combinators:\n        # random seed reset\n        for j, k in zip(config['hyper_parameters'], hyper_tuple):\n            config[j] = k\n        init_seed(config['seed'])\n\n        logger.info('========={}/{}: Parameters:{}={}======='.format(\n            idx+1, total_loops, config['hyper_parameters'], hyper_tuple))\n\n        # set random state of dataloader\n        train_data.pretrain_setup()\n        # model loading and initialization\n        model = get_model(config['model'])(config, train_data).to(config['device'])\n        logger.info(model)\n\n        # trainer loading and initialization\n        trainer = get_trainer()(config, model, mg)\n        # debug\n        # model training\n        best_valid_score, best_valid_result, best_test_upon_valid = trainer.fit(train_data, valid_data=valid_data, test_data=test_data, saved=save_model)\n        #########\n        hyper_ret.append((hyper_tuple, best_valid_result, best_test_upon_valid))\n\n        # save best test\n        if best_test_upon_valid[val_metric] > best_test_value:\n            best_test_value = best_test_upon_valid[val_metric]\n            best_test_idx = idx\n        idx += 1\n\n        logger.info('best valid result: {}'.format(dict2str(best_valid_result)))\n        logger.info('test result: {}'.format(dict2str(best_test_upon_valid)))\n        logger.info('████Current BEST████:\\nParameters: {}={},\\n'\n                    'Valid: {},\\nTest: {}\\n\\n\\n'.format(config['hyper_parameters'],\n            hyper_ret[best_test_idx][0], dict2str(hyper_ret[best_test_idx][1]), dict2str(hyper_ret[best_test_idx][2])))\n\n    # log info\n    logger.info('\\n============All Over=====================')\n    for (p, k, v) in hyper_ret:\n        logger.info('Parameters: {}={},\\n best valid: {},\\n best test: {}'.format(config['hyper_parameters'],\n                                                                                  p, dict2str(k), dict2str(v)))\n\n    logger.info('\\n\\n█████████████ BEST ████████████████')\n    logger.info('\\tParameters: {}={},\\nValid: {},\\nTest: {}\\n\\n'.format(config['hyper_parameters'],\n                                                                   hyper_ret[best_test_idx][0],\n                                                                   dict2str(hyper_ret[best_test_idx][1]),\n                                                                   dict2str(hyper_ret[best_test_idx][2])))\n\n"
  },
  {
    "path": "src/utils/topk_evaluator.py",
    "content": "# coding: utf-8\n# @email: enoche.chow@gmail.com\n\"\"\"\n################################\n\"\"\"\nimport os\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom utils.metrics import metrics_dict\nfrom torch.nn.utils.rnn import pad_sequence\nfrom utils.utils import get_local_time\n\n\n# These metrics are typical in topk recommendations\ntopk_metrics = {metric.lower(): metric for metric in ['Recall', 'Recall2', 'Precision', 'NDCG', 'MAP']}\n\n\nclass TopKEvaluator(object):\n    r\"\"\"TopK Evaluator is mainly used in ranking tasks. Now, we support six topk metrics which\n    contain `'Hit', 'Recall', 'MRR', 'Precision', 'NDCG', 'MAP'`.\n\n    Note:\n        The metrics used calculate group-based metrics which considers the metrics scores averaged\n        across users. Some of them are also limited to k.\n\n    \"\"\"\n\n    def __init__(self, config):\n        self.config = config\n        self.metrics = config['metrics']\n        self.topk = config['topk']\n        self.save_recom_result = config['save_recommended_topk']\n        self._check_args()\n\n    def collect(self, interaction, scores_tensor, full=False):\n        \"\"\"collect the topk intermediate result of one batch, this function mainly\n        implements padding and TopK finding. It is called at the end of each batch\n\n        Args:\n            interaction (Interaction): :class:`AbstractEvaluator` of the batch\n            scores_tensor (tensor): the tensor of model output with size of `(N, )`\n            full (bool, optional): whether it is full sort. Default: False.\n\n        \"\"\"\n        user_len_list = interaction.user_len_list\n        if full is True:\n            scores_matrix = scores_tensor.view(len(user_len_list), -1)\n        else:\n            scores_list = torch.split(scores_tensor, user_len_list, dim=0)\n            scores_matrix = pad_sequence(scores_list, batch_first=True, padding_value=-np.inf)  # nusers x items\n\n        # get topk\n        _, topk_index = torch.topk(scores_matrix, max(self.topk), dim=-1)  # nusers x k\n\n        return topk_index\n\n    def evaluate(self, batch_matrix_list, eval_data, is_test=False, idx=0):\n        \"\"\"calculate the metrics of all batches. It is called at the end of each epoch\n\n        Args:\n            batch_matrix_list (list): the results of all batches\n            eval_data (Dataset): the class of test data\n            is_test: in testing?\n\n        Returns:\n            dict: such as ``{'Hit@20': 0.3824, 'Recall@20': 0.0527, 'Hit@10': 0.3153, 'Recall@10': 0.0329}``\n\n        \"\"\"\n        pos_items = eval_data.get_eval_items()\n        pos_len_list = eval_data.get_eval_len_list()\n        topk_index = torch.cat(batch_matrix_list, dim=0).cpu().numpy()\n        # if save recommendation result?\n        if self.save_recom_result and is_test:\n            dataset_name = self.config['dataset']\n            model_name = self.config['model']\n            max_k = max(self.topk)\n            dir_name = os.path.abspath(self.config['recommend_topk'])\n            if not os.path.exists(dir_name):\n                os.makedirs(dir_name)\n            file_path = os.path.join(dir_name, '{}-{}-idx{}-top{}-{}.csv'.format(\n                model_name, dataset_name, idx, max_k, get_local_time()))\n            x_df = pd.DataFrame(topk_index)\n            x_df.insert(0, 'id', eval_data.get_eval_users())\n            x_df.columns = ['id']+['top_'+str(i) for i in range(max_k)]\n            x_df = x_df.astype(int)\n            x_df.to_csv(file_path, sep='\\t', index=False)\n        assert len(pos_len_list) == len(topk_index)\n        # if recom right?\n        bool_rec_matrix = []\n        for m, n in zip(pos_items, topk_index):\n            bool_rec_matrix.append([True if i in m else False for i in n])\n        bool_rec_matrix = np.asarray(bool_rec_matrix)\n\n        # get metrics\n        metric_dict = {}\n        result_list = self._calculate_metrics(pos_len_list, bool_rec_matrix)\n        for metric, value in zip(self.metrics, result_list):\n            for k in self.topk:\n                key = '{}@{}'.format(metric, k)\n                metric_dict[key] = round(value[k - 1], 4)\n        return metric_dict\n\n    def _check_args(self):\n        # Check metrics\n        if isinstance(self.metrics, (str, list)):\n            if isinstance(self.metrics, str):\n                self.metrics = [self.metrics]\n        else:\n            raise TypeError('metrics must be str or list')\n\n        # Convert metric to lowercase\n        for m in self.metrics:\n            if m.lower() not in topk_metrics:\n                raise ValueError(\"There is no user grouped topk metric named {}!\".format(m))\n        self.metrics = [metric.lower() for metric in self.metrics]\n\n        # Check topk:\n        if isinstance(self.topk, (int, list)):\n            if isinstance(self.topk, int):\n                self.topk = [self.topk]\n            for topk in self.topk:\n                if topk <= 0:\n                    raise ValueError(\n                        'topk must be a positive integer or a list of positive integers, but get `{}`'.format(topk))\n        else:\n            raise TypeError('The topk must be a integer, list')\n\n    def _calculate_metrics(self, pos_len_list, topk_index):\n        \"\"\"integrate the results of each batch and evaluate the topk metrics by users\n\n        Args:\n            pos_len_list (list): a list of users' positive items\n            topk_index (np.ndarray): a matrix which contains the index of the topk items for users\n        Returns:\n            np.ndarray: a matrix which contains the metrics result\n        \"\"\"\n        result_list = []\n        for metric in self.metrics:\n            metric_fuc = metrics_dict[metric.lower()]\n            result = metric_fuc(topk_index, pos_len_list)\n            result_list.append(result)\n        return np.stack(result_list, axis=0)\n\n    def __str__(self):\n        mesg = 'The TopK Evaluator Info:\\n' + '\\tMetrics:[' + ', '.join(\n            [topk_metrics[metric.lower()] for metric in self.metrics]) \\\n               + '], TopK:[' + ', '.join(map(str, self.topk)) + ']'\n        return mesg\n"
  },
  {
    "path": "src/utils/utils.py",
    "content": "# coding: utf-8\n# @email  : enoche.chow@gmail.com\n\n\"\"\"\nUtility functions\n##########################\n\"\"\"\n\nimport numpy as np\nimport torch\nimport importlib\nimport datetime\nimport random\n\n\ndef get_local_time():\n    r\"\"\"Get current time\n\n    Returns:\n        str: current time\n    \"\"\"\n    cur = datetime.datetime.now()\n    cur = cur.strftime('%b-%d-%Y-%H-%M-%S')\n\n    return cur\n\n\ndef get_model(model_name):\n    r\"\"\"Automatically select model class based on model name\n    Args:\n        model_name (str): model name\n    Returns:\n        Recommender: model class\n    \"\"\"\n    model_file_name = model_name.lower()\n    module_path = '.'.join(['models', model_file_name])\n    if importlib.util.find_spec(module_path, __name__):\n        model_module = importlib.import_module(module_path, __name__)\n\n    model_class = getattr(model_module, model_name)\n    return model_class\n\n\ndef get_trainer():\n    return getattr(importlib.import_module('common.trainer'), 'Trainer')\n\n\ndef init_seed(seed):\n    random.seed(seed)\n    np.random.seed(seed)\n    if torch.cuda.is_available():\n        torch.cuda.manual_seed(seed)\n        torch.cuda.manual_seed_all(seed)\n    torch.manual_seed(seed)\n\n\ndef early_stopping(value, best, cur_step, max_step, bigger=True):\n    r\"\"\" validation-based early stopping\n\n    Args:\n        value (float): current result\n        best (float): best result\n        cur_step (int): the number of consecutive steps that did not exceed the best result\n        max_step (int): threshold steps for stopping\n        bigger (bool, optional): whether the bigger the better\n\n    Returns:\n        tuple:\n        - float,\n          best result after this step\n        - int,\n          the number of consecutive steps that did not exceed the best result after this step\n        - bool,\n          whether to stop\n        - bool,\n          whether to update\n    \"\"\"\n    stop_flag = False\n    update_flag = False\n    if bigger:\n        if value > best:\n            cur_step = 0\n            best = value\n            update_flag = True\n        else:\n            cur_step += 1\n            if cur_step > max_step:\n                stop_flag = True\n    else:\n        if value < best:\n            cur_step = 0\n            best = value\n            update_flag = True\n        else:\n            cur_step += 1\n            if cur_step > max_step:\n                stop_flag = True\n    return best, cur_step, stop_flag, update_flag\n\n\ndef dict2str(result_dict):\n    r\"\"\" convert result dict to str\n\n    Args:\n        result_dict (dict): result dict\n\n    Returns:\n        str: result str\n    \"\"\"\n\n    result_str = ''\n    for metric, value in result_dict.items():\n        result_str += str(metric) + ': ' + '%.04f' % value + '    '\n    return result_str\n\n\n############ LATTICE Utilities #########\n\ndef build_knn_neighbourhood(adj, topk):\n    knn_val, knn_ind = torch.topk(adj, topk, dim=-1)\n    weighted_adjacency_matrix = (torch.zeros_like(adj)).scatter_(-1, knn_ind, knn_val)\n    return weighted_adjacency_matrix\n\n\ndef compute_normalized_laplacian(adj):\n    rowsum = torch.sum(adj, -1)\n    d_inv_sqrt = torch.pow(rowsum, -0.5)\n    d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.\n    d_mat_inv_sqrt = torch.diagflat(d_inv_sqrt)\n    L_norm = torch.mm(torch.mm(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)\n    return L_norm\n\n\ndef build_sim(context):\n    context_norm = context.div(torch.norm(context, p=2, dim=-1, keepdim=True))\n    sim = torch.mm(context_norm, context_norm.transpose(1, 0))\n    return sim\n\ndef get_sparse_laplacian(edge_index, edge_weight, num_nodes, normalization='none'):\n    from torch_scatter import scatter_add\n    row, col = edge_index[0], edge_index[1]\n    deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)\n\n    if normalization == 'sym':\n        deg_inv_sqrt = deg.pow_(-0.5)\n        deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0)\n        edge_weight = deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]\n    elif normalization == 'rw':\n        deg_inv = 1.0 / deg\n        deg_inv.masked_fill_(deg_inv == float('inf'), 0)\n        edge_weight = deg_inv[row] * edge_weight\n    return edge_index, edge_weight\n\ndef get_dense_laplacian(adj, normalization='none'):\n    if normalization == 'sym':\n        rowsum = torch.sum(adj, -1)\n        d_inv_sqrt = torch.pow(rowsum, -0.5)\n        d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.\n        d_mat_inv_sqrt = torch.diagflat(d_inv_sqrt)\n        L_norm = torch.mm(torch.mm(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)\n    elif normalization == 'rw':\n        rowsum = torch.sum(adj, -1)\n        d_inv = torch.pow(rowsum, -1)\n        d_inv[torch.isinf(d_inv)] = 0.\n        d_mat_inv = torch.diagflat(d_inv)\n        L_norm = torch.mm(d_mat_inv, adj)\n    elif normalization == 'none':\n        L_norm = adj\n    return L_norm\n\ndef build_knn_normalized_graph(adj, topk, is_sparse, norm_type):\n    device = adj.device\n    knn_val, knn_ind = torch.topk(adj, topk, dim=-1)\n    if is_sparse:\n        tuple_list = [[row, int(col)] for row in range(len(knn_ind)) for col in knn_ind[row]]\n        row = [i[0] for i in tuple_list]\n        col = [i[1] for i in tuple_list]\n        i = torch.LongTensor([row, col]).to(device)\n        v = knn_val.flatten()\n        edge_index, edge_weight = get_sparse_laplacian(i, v, normalization=norm_type, num_nodes=adj.shape[0])\n        return torch.sparse_coo_tensor(edge_index, edge_weight, adj.shape)\n    else:\n        weighted_adjacency_matrix = (torch.zeros_like(adj)).scatter_(-1, knn_ind, knn_val)\n        return get_dense_laplacian(weighted_adjacency_matrix, normalization=norm_type)"
  }
]