[
  {
    "path": ".gitignore",
    "content": "# https://github.com/github/gitignore/blob/main/Python.gitignore\r\n\r\n# Byte-compiled / optimized / DLL files\r\n__pycache__/\r\n*.py[cod]\r\n*$py.class\r\n\r\n# C extensions\r\n*.so\r\n\r\n# Distribution / packaging\r\n.Python\r\nbuild/\r\ndevelop-eggs/\r\ndist/\r\ndownloads/\r\neggs/\r\n.eggs/\r\nlib/\r\nlib64/\r\nparts/\r\nsdist/\r\nvar/\r\nwheels/\r\nshare/python-wheels/\r\n*.egg-info/\r\n.installed.cfg\r\n*.egg\r\nMANIFEST\r\n\r\n# PyInstaller\r\n#  Usually these files are written by a python script from a template\r\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\r\n*.manifest\r\n*.spec\r\n\r\n# Installer logs\r\npip-log.txt\r\npip-delete-this-directory.txt\r\n\r\n# Unit test / coverage reports\r\nhtmlcov/\r\n.tox/\r\n.nox/\r\n.coverage\r\n.coverage.*\r\n.cache\r\nnosetests.xml\r\ncoverage.xml\r\n*.cover\r\n*.py,cover\r\n.hypothesis/\r\n.pytest_cache/\r\ncover/\r\n\r\n# Translations\r\n*.mo\r\n*.pot\r\n\r\n# Django stuff:\r\n*.log\r\nlocal_settings.py\r\ndb.sqlite3\r\ndb.sqlite3-journal\r\n\r\n# Flask stuff:\r\ninstance/\r\n.webassets-cache\r\n\r\n# Scrapy stuff:\r\n.scrapy\r\n\r\n# Sphinx documentation\r\ndocs/_build/\r\n\r\n# PyBuilder\r\n.pybuilder/\r\ntarget/\r\n\r\n# Jupyter Notebook\r\n.ipynb_checkpoints\r\n\r\n# IPython\r\nprofile_default/\r\nipython_config.py\r\n\r\n# pyenv\r\n#   For a library or package, you might want to ignore these files since the code is\r\n#   intended to run in multiple environments; otherwise, check them in:\r\n# .python-version\r\n\r\n# pipenv\r\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\r\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\r\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\r\n#   install all needed dependencies.\r\n#Pipfile.lock\r\n\r\n# poetry\r\n#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.\r\n#   This is especially recommended for binary packages to ensure reproducibility, and is more\r\n#   commonly ignored for libraries.\r\n#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control\r\n#poetry.lock\r\n\r\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow\r\n__pypackages__/\r\n\r\n# Celery stuff\r\ncelerybeat-schedule\r\ncelerybeat.pid\r\n\r\n# SageMath parsed files\r\n*.sage.py\r\n\r\n# Environments\r\n.env\r\n.venv\r\nenv/\r\nvenv/\r\nENV/\r\nenv.bak/\r\nvenv.bak/\r\n\r\n# Spyder project settings\r\n.spyderproject\r\n.spyproject\r\n\r\n# Rope project settings\r\n.ropeproject\r\n\r\n# mkdocs documentation\r\n/site\r\n\r\n# mypy\r\n.mypy_cache/\r\n.dmypy.json\r\ndmypy.json\r\n\r\n# Pyre type checker\r\n.pyre/\r\n\r\n# pytype static type analyzer\r\n.pytype/\r\n\r\n# Cython debug symbols\r\ncython_debug/\r\n\r\n# PyCharm\r\n#  JetBrains specific template is maintainted in a separate JetBrains.gitignore that can\r\n#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore\r\n#  and can be added to the global gitignore or merged into this file.  For a more nuclear\r\n#  option (not recommended) you can uncomment the following to ignore the entire idea folder.\r\n#.idea/"
  },
  {
    "path": "DMCP.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nfrom Models.RFR import RFR\r\nfrom Models.KRR import KRR\r\nfrom Models.GBR import GBR\r\nfrom Models.KNR import KNR\r\nfrom Models.FNN import FNN\r\nfrom Models.SVR import SVR\r\nfrom Models.Lasso import LSO\r\nfrom Models.ENR import ENR\r\nfrom Models.GPR import GPR\r\nfrom Models.ETR import ETR\r\nfrom Models.MLP import MLP\r\nfrom Visualization.Violin import plot_Violin\r\nfrom Visualization.bar import plot_bar\r\nfrom Visualization.scatter import plot_scatter\r\nfrom Visualization.pearson import plot_pearson\r\nfrom Visualization.pie import plot_pie\r\nimport os\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split, cross_val_score\r\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler, Normalizer\r\nimport f90nml\r\nfrom multidict import CIMultiDict\r\nfrom statistics import mean\r\n\r\ninput_file = f90nml.read('DMCP_input_file')\r\ndata = CIMultiDict(input_file['data'])\r\ngeneral = CIMultiDict(input_file['general'])\r\nvisualization = CIMultiDict(input_file['visualization'])\r\n\r\n\r\ndef parse_data():\r\n    # intrn\r\n    if 'intrn' not in data.keys():\r\n        print('No train data file')\r\n    else:\r\n        data_file = data['intrn']\r\n        data_train = np.loadtxt(data_file, delimiter=\",\", dtype=\"float\")\r\n        # grept\r\n        if 'grept' not in general.keys():\r\n            iteration = 1\r\n        else:\r\n            iteration = general['grept']\r\n\r\n        train_set_RMSE = {}\r\n        train_set_R2 = {}\r\n        test_set_RMSE = {}\r\n        test_set_R2 = {}\r\n        estimator_dict = {}\r\n        for i in range(iteration):\r\n            x = preprocessing_data(data_train)\r\n            x = add_noise(x)\r\n            y = data_train[..., -1]\r\n            ##psplt\r\n            if 'psplt' not in general.keys():\r\n                test_size = 0.2\r\n            else:\r\n                test_size = 1 - general['psplt']\r\n            x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_size, random_state=16)\r\n\r\n            # model_load\r\n            # gmodl\r\n            if 'gmodl' not in general:\r\n                print(\"Not choose model\")\r\n            else:\r\n                model_list = general['gmodl']\r\n                if type(model_list) is str:\r\n                    model_list = [model_list]\r\n                for model in model_list:\r\n                    param = get_params(model)\r\n                    model_obj = eval(model)\r\n                    ML_model = model_obj()\r\n                    if 'modpr' in general:\r\n                        if general['modpr'] == 'ON':\r\n                            ML_model.auto_tune_params(x_train, y_train)\r\n                    ML_model.modify_params(param)\r\n                    ML_model.build_model()\r\n                    # gcrva\r\n                    if 'gcrva' in general.keys():\r\n                        if general['gcrva'] == 'ON':\r\n                            train_rmse, train_r2, test_rmse, test_r2, estimator = ML_model.model_evaluate(x, y, general[\r\n                                'gcvrn'])\r\n                            if i == 0:\r\n                                train_set_RMSE[model] = [train_rmse]\r\n                                train_set_R2[model] = [train_r2]\r\n                                test_set_RMSE[model] = [test_rmse]\r\n                                test_set_R2[model] = [test_r2]\r\n                                estimator_dict[model] = [estimator]\r\n                            else:\r\n                                train_set_RMSE[model].append(train_rmse)\r\n                                train_set_R2[model].append(train_r2)\r\n                                test_set_RMSE[model].append(test_rmse)\r\n                                test_set_R2[model].append(test_r2)\r\n                                estimator_dict[model].append(estimator)\r\n                    # ML_model.calculate(x_train, x_test, y_train, y_test)\r\n        result_visualize(train_set_RMSE, train_set_R2, test_set_RMSE, test_set_R2)\r\n        optimal_model, optimal_model_name = choose_optimal_model(train_set_RMSE, estimator_dict)\r\n        predict(optimal_model, optimal_model_name)\r\n\r\n\r\ndef predict(optimal_model, optimal_model_name):\r\n    if 'intrn' not in data.keys():\r\n        print('No train data file')\r\n    else:\r\n        data_file = data['intrn']\r\n        data_train = np.loadtxt(data_file, delimiter=\",\", dtype=\"float\")\r\n        x = preprocessing_data(data_train)\r\n        x = add_noise(x)\r\n        y = data_train[..., -1]\r\n        ##psplt\r\n        if 'psplt' not in general.keys():\r\n            test_size = 0.2\r\n        else:\r\n            test_size = 1 - general['psplt']\r\n        x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_size, random_state=16)\r\n        y_train_true, y_train_pred = y_train, optimal_model.predict(x_train)\r\n        y_test_true, y_test_pred = y_test, optimal_model.predict(x_test)\r\n        plot_scatter(y_train_true, y_train_pred, y_test_true, y_test_pred)\r\n        plot_pearson(optimal_model_name, x_train)\r\n        if optimal_model_name in ['GBR', 'RFR', 'ETR']:\r\n            plot_pie(optimal_model_name, optimal_model.feature_importances_)\r\n\r\n\r\ndef choose_optimal_model(model_evaluate_data, estimator_dict):\r\n    if 'fmodl' in visualization:\r\n        optimal_model_name = visualization['fmodl']\r\n    else:\r\n        model_list = []\r\n        data_list = []\r\n        for key in model_evaluate_data.keys():\r\n            model_list.append(key)\r\n            data_list.append(mean(model_evaluate_data[key]))\r\n        optimal_model_name = model_list[data_list.index(min(data_list))]\r\n    optmal_index = model_evaluate_data[optimal_model_name].index(min(model_evaluate_data[optimal_model_name]))\r\n    optimal_model = estimator_dict[optimal_model_name][optmal_index][0]\r\n    return optimal_model, optimal_model_name\r\n\r\n\r\n# def parse_DMCP_input_file():\r\n# input_file = f90nml.read('DMCP_input_file')\r\n# return input_file\r\n\r\ndef result_visualize(train_set_RMSE, train_set_R2, test_set_RMSE, test_set_R2):\r\n    if 'vvoln' in visualization.keys():\r\n        if visualization['vvoln'] == 'ON':\r\n            plot_Violin('RMSE', test_set_RMSE)\r\n            plot_Violin('R2', test_set_R2)\r\n    if 'vcomp' in visualization.keys():\r\n        if visualization['vcomp'] == 'ON':\r\n            plot_bar('DMCP', train_set_RMSE, train_set_R2, test_set_RMSE, test_set_R2)\r\n\r\n\r\ndef preprocessing_data(data_train):\r\n    # pscal\r\n    if 'pscal' not in general.keys():\r\n        X = data_train[..., 0:(data_train.shape[1] - 1)]\r\n    else:\r\n        if general['pscal'] == 'OFF':\r\n            X = data_train[..., 0:(data_train.shape[1] - 1)]\r\n        elif general['pscal'] == 'NOR':\r\n            scaler = MinMaxScaler()\r\n            X = scaler.fit_transform(data_train[..., 0:(data_train.shape[1] - 1)])\r\n        elif general['pscal'] == 'STA':\r\n            scaler = StandardScaler()\r\n            X = scaler.fit_transform(data_train[..., 0:(data_train.shape[1] - 1)])\r\n        else:\r\n            scaler = Normalizer(norm='l2')\r\n            X = scaler.fit_transform(data_train[..., 0:(data_train.shape[1] - 1)])\r\n    return X\r\n\r\n\r\ndef add_noise(X):\r\n    # pnose\r\n    if 'pnose' not in general.keys():\r\n        X = X\r\n    else:\r\n        scale = general['pnose']\r\n        x_noise = np.random.normal(loc=0.0, scale=scale, size=X.shape)\r\n        X = X + x_noise\r\n    return X\r\n\r\n\r\ndef get_params(model):\r\n    if ('PR' + model) not in general.keys():\r\n        param = {}\r\n        print(\"Not set\" + 'PR' + model)\r\n    else:\r\n        param = general['PR' + model]\r\n        param_key = param[1:(len(param) - 1):3]\r\n        param_val = param[3:(len(param) - 1):3]\r\n        param = dict(zip(param_key, param_val))\r\n    return param\r\n\r\n\r\ndef main():\r\n    parse_data()\r\n\r\nif __name__ == \"__main__\":\r\n    main()\r\n"
  },
  {
    "path": "LICENSE",
    "content": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 2, June 1991\n\n Copyright (C) 1989, 1991 Free Software Foundation, Inc.,\n 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The licenses for most software are designed to take away your\nfreedom to share and change it.  By contrast, the GNU General Public\nLicense is intended to guarantee your freedom to share and change free\nsoftware--to make sure the software is free for all its users.  This\nGeneral Public License applies to most of the Free Software\nFoundation's software and to any other program whose authors commit to\nusing it.  (Some other Free Software Foundation software is covered by\nthe GNU Lesser General Public License instead.)  You can apply it to\nyour programs, too.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthis service if you wish), that you receive source code or can get it\nif you want it, that you can change the software or use pieces of it\nin new free programs; and that you know you can do these things.\n\n  To protect your rights, we need to make restrictions that forbid\nanyone to deny you these rights or to ask you to surrender the rights.\nThese restrictions translate to certain responsibilities for you if you\ndistribute copies of the software, or if you modify it.\n\n  For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must give the recipients all the rights that\nyou have.  You must make sure that they, too, receive or can get the\nsource code.  And you must show them these terms so they know their\nrights.\n\n  We protect your rights with two steps: (1) copyright the software, and\n(2) offer you this license which gives you legal permission to copy,\ndistribute and/or modify the software.\n\n  Also, for each author's protection and ours, we want to make certain\nthat everyone understands that there is no warranty for this free\nsoftware.  If the software is modified by someone else and passed on, we\nwant its recipients to know that what they have is not the original, so\nthat any problems introduced by others will not reflect on the original\nauthors' reputations.\n\n  Finally, any free program is threatened constantly by software\npatents.  We wish to avoid the danger that redistributors of a free\nprogram will individually obtain patent licenses, in effect making the\nprogram proprietary.  To prevent this, we have made it clear that any\npatent must be licensed for everyone's free use or not licensed at all.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                    GNU GENERAL PUBLIC LICENSE\n   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n  0. This License applies to any program or other work which contains\na notice placed by the copyright holder saying it may be distributed\nunder the terms of this General Public License.  The \"Program\", below,\nrefers to any such program or work, and a \"work based on the Program\"\nmeans either the Program or any derivative work under copyright law:\nthat is to say, a work containing the Program or a portion of it,\neither verbatim or with modifications and/or translated into another\nlanguage.  (Hereinafter, translation is included without limitation in\nthe term \"modification\".)  Each licensee is addressed as \"you\".\n\nActivities other than copying, distribution and modification are not\ncovered by this License; they are outside its scope.  The act of\nrunning the Program is not restricted, and the output from the Program\nis covered only if its contents constitute a work based on the\nProgram (independent of having been made by running the Program).\nWhether that is true depends on what the Program does.\n\n  1. You may copy and distribute verbatim copies of the Program's\nsource code as you receive it, in any medium, provided that you\nconspicuously and appropriately publish on each copy an appropriate\ncopyright notice and disclaimer of warranty; keep intact all the\nnotices that refer to this License and to the absence of any warranty;\nand give any other recipients of the Program a copy of this License\nalong with the Program.\n\nYou may charge a fee for the physical act of transferring a copy, and\nyou may at your option offer warranty protection in exchange for a fee.\n\n  2. You may modify your copy or copies of the Program or any portion\nof it, thus forming a work based on the Program, and copy and\ndistribute such modifications or work under the terms of Section 1\nabove, provided that you also meet all of these conditions:\n\n    a) You must cause the modified files to carry prominent notices\n    stating that you changed the files and the date of any change.\n\n    b) You must cause any work that you distribute or publish, that in\n    whole or in part contains or is derived from the Program or any\n    part thereof, to be licensed as a whole at no charge to all third\n    parties under the terms of this License.\n\n    c) If the modified program normally reads commands interactively\n    when run, you must cause it, when started running for such\n    interactive use in the most ordinary way, to print or display an\n    announcement including an appropriate copyright notice and a\n    notice that there is no warranty (or else, saying that you provide\n    a warranty) and that users may redistribute the program under\n    these conditions, and telling the user how to view a copy of this\n    License.  (Exception: if the Program itself is interactive but\n    does not normally print such an announcement, your work based on\n    the Program is not required to print an announcement.)\n\nThese requirements apply to the modified work as a whole.  If\nidentifiable sections of that work are not derived from the Program,\nand can be reasonably considered independent and separate works in\nthemselves, then this License, and its terms, do not apply to those\nsections when you distribute them as separate works.  But when you\ndistribute the same sections as part of a whole which is a work based\non the Program, the distribution of the whole must be on the terms of\nthis License, whose permissions for other licensees extend to the\nentire whole, and thus to each and every part regardless of who wrote it.\n\nThus, it is not the intent of this section to claim rights or contest\nyour rights to work written entirely by you; rather, the intent is to\nexercise the right to control the distribution of derivative or\ncollective works based on the Program.\n\nIn addition, mere aggregation of another work not based on the Program\nwith the Program (or with a work based on the Program) on a volume of\na storage or distribution medium does not bring the other work under\nthe scope of this License.\n\n  3. You may copy and distribute the Program (or a work based on it,\nunder Section 2) in object code or executable form under the terms of\nSections 1 and 2 above provided that you also do one of the following:\n\n    a) Accompany it with the complete corresponding machine-readable\n    source code, which must be distributed under the terms of Sections\n    1 and 2 above on a medium customarily used for software interchange; or,\n\n    b) Accompany it with a written offer, valid for at least three\n    years, to give any third party, for a charge no more than your\n    cost of physically performing source distribution, a complete\n    machine-readable copy of the corresponding source code, to be\n    distributed under the terms of Sections 1 and 2 above on a medium\n    customarily used for software interchange; or,\n\n    c) Accompany it with the information you received as to the offer\n    to distribute corresponding source code.  (This alternative is\n    allowed only for noncommercial distribution and only if you\n    received the program in object code or executable form with such\n    an offer, in accord with Subsection b above.)\n\nThe source code for a work means the preferred form of the work for\nmaking modifications to it.  For an executable work, complete source\ncode means all the source code for all modules it contains, plus any\nassociated interface definition files, plus the scripts used to\ncontrol compilation and installation of the executable.  However, as a\nspecial exception, the source code distributed need not include\nanything that is normally distributed (in either source or binary\nform) with the major components (compiler, kernel, and so on) of the\noperating system on which the executable runs, unless that component\nitself accompanies the executable.\n\nIf distribution of executable or object code is made by offering\naccess to copy from a designated place, then offering equivalent\naccess to copy the source code from the same place counts as\ndistribution of the source code, even though third parties are not\ncompelled to copy the source along with the object code.\n\n  4. You may not copy, modify, sublicense, or distribute the Program\nexcept as expressly provided under this License.  Any attempt\notherwise to copy, modify, sublicense or distribute the Program is\nvoid, and will automatically terminate your rights under this License.\nHowever, parties who have received copies, or rights, from you under\nthis License will not have their licenses terminated so long as such\nparties remain in full compliance.\n\n  5. You are not required to accept this License, since you have not\nsigned it.  However, nothing else grants you permission to modify or\ndistribute the Program or its derivative works.  These actions are\nprohibited by law if you do not accept this License.  Therefore, by\nmodifying or distributing the Program (or any work based on the\nProgram), you indicate your acceptance of this License to do so, and\nall its terms and conditions for copying, distributing or modifying\nthe Program or works based on it.\n\n  6. Each time you redistribute the Program (or any work based on the\nProgram), the recipient automatically receives a license from the\noriginal licensor to copy, distribute or modify the Program subject to\nthese terms and conditions.  You may not impose any further\nrestrictions on the recipients' exercise of the rights granted herein.\nYou are not responsible for enforcing compliance by third parties to\nthis License.\n\n  7. If, as a consequence of a court judgment or allegation of patent\ninfringement or for any other reason (not limited to patent issues),\nconditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot\ndistribute so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you\nmay not distribute the Program at all.  For example, if a patent\nlicense would not permit royalty-free redistribution of the Program by\nall those who receive copies directly or indirectly through you, then\nthe only way you could satisfy both it and this License would be to\nrefrain entirely from distribution of the Program.\n\nIf any portion of this section is held invalid or unenforceable under\nany particular circumstance, the balance of the section is intended to\napply and the section as a whole is intended to apply in other\ncircumstances.\n\nIt is not the purpose of this section to induce you to infringe any\npatents or other property right claims or to contest validity of any\nsuch claims; this section has the sole purpose of protecting the\nintegrity of the free software distribution system, which is\nimplemented by public license practices.  Many people have made\ngenerous contributions to the wide range of software distributed\nthrough that system in reliance on consistent application of that\nsystem; it is up to the author/donor to decide if he or she is willing\nto distribute software through any other system and a licensee cannot\nimpose that choice.\n\nThis section is intended to make thoroughly clear what is believed to\nbe a consequence of the rest of this License.\n\n  8. If the distribution and/or use of the Program is restricted in\ncertain countries either by patents or by copyrighted interfaces, the\noriginal copyright holder who places the Program under this License\nmay add an explicit geographical distribution limitation excluding\nthose countries, so that distribution is permitted only in or among\ncountries not thus excluded.  In such case, this License incorporates\nthe limitation as if written in the body of this License.\n\n  9. The Free Software Foundation may publish revised and/or new versions\nof the General Public License from time to time.  Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\nEach version is given a distinguishing version number.  If the Program\nspecifies a version number of this License which applies to it and \"any\nlater version\", you have the option of following the terms and conditions\neither of that version or of any later version published by the Free\nSoftware Foundation.  If the Program does not specify a version number of\nthis License, you may choose any version ever published by the Free Software\nFoundation.\n\n  10. If you wish to incorporate parts of the Program into other free\nprograms whose distribution conditions are different, write to the author\nto ask for permission.  For software which is copyrighted by the Free\nSoftware Foundation, write to the Free Software Foundation; we sometimes\nmake exceptions for this.  Our decision will be guided by the two goals\nof preserving the free status of all derivatives of our free software and\nof promoting the sharing and reuse of software generally.\n\n                            NO WARRANTY\n\n  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY\nFOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN\nOTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES\nPROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED\nOR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS\nTO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE\nPROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,\nREPAIR OR CORRECTION.\n\n  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR\nREDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,\nINCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING\nOUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED\nTO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY\nYOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER\nPROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGES.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nconvey the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software; you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation; either version 2 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License along\n    with this program; if not, write to the Free Software Foundation, Inc.,\n    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nAlso add information on how to contact you by electronic and paper mail.\n\nIf the program is interactive, make it output a short notice like this\nwhen it starts in an interactive mode:\n\n    Gnomovision version 69, Copyright (C) year name of author\n    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n    This is free software, and you are welcome to redistribute it\n    under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License.  Of course, the commands you use may\nbe called something other than `show w' and `show c'; they could even be\nmouse-clicks or menu items--whatever suits your program.\n\nYou should also get your employer (if you work as a programmer) or your\nschool, if any, to sign a \"copyright disclaimer\" for the program, if\nnecessary.  Here is a sample; alter the names:\n\n  Yoyodyne, Inc., hereby disclaims all copyright interest in the program\n  `Gnomovision' (which makes passes at compilers) written by James Hacker.\n\n  <signature of Ty Coon>, 1 April 1989\n  Ty Coon, President of Vice\n\nThis General Public License does not permit incorporating your program into\nproprietary programs.  If your program is a subroutine library, you may\nconsider it more useful to permit linking proprietary applications with the\nlibrary.  If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License.\n"
  },
  {
    "path": "MANIFEST.in",
    "content": "include requirements.txt\n"
  },
  {
    "path": "Models/ENR.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport numpy as np\r\nfrom sklearn.linear_model import ElasticNet as enr\r\nfrom sklearn.metrics import mean_squared_error as mse\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.model_selection import train_test_split,cross_validate,GridSearchCV\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nclass ENR(object):\r\n    def __init__(self):\r\n        self.params_defualt = {'alpha': 1.0,\r\n                                 'fit_intercept': True,\r\n                                 'l1_ratio': 0.5,\r\n                                 'normalize': False,\r\n                                 'precompute': False,\r\n                                 'max_iter': 1000,\r\n                                 'tol': 1e-4,\r\n                                 'warm_start': False,\r\n                                 'positive': False,\r\n                                 'selection': 'cyclic',\r\n                                 'random_state': 2}\r\n        self.tuned_parameters = {'alpha': [1.0] ,\r\n                                'fit_intercept': [True],\r\n                                'l1_ratio':[0.5],\r\n                                'normalize': [False],\r\n                                'precompute': [False],\r\n                                'max_iter': [1000],\r\n                                'tol': [1e-4],\r\n                                'warm_start': [False],\r\n                                'positive': [False],\r\n                                'selection': ['cyclic'],\r\n                                'random_state': [2]}\r\n\r\n    def auto_tune_params(self, x_train, y_train):\r\n        #use RMSE as the scoring\r\n        clf = GridSearchCV(\r\n            enr(), self.tuned_parameters, scoring='neg_root_mean_squared_error'\r\n        )\r\n        clf.fit(x_train, y_train)\r\n\r\n        print(\"Best parameters set found on development set:\")\r\n        print()\r\n        print(clf.best_params_)\r\n        self.params_defualt = clf.best_params_\r\n\r\n    def modify_params(self, params):\r\n        for key in params:\r\n            self.params_defualt[key] = params[key]\r\n\r\n    def build_model(self):\r\n        self.model = enr(**self.params_defualt)\r\n\r\n    def model_evaluate(self, x, y, cv):\r\n        scoring = ['neg_root_mean_squared_error', 'r2']\r\n        scores = cross_validate(self.model, x, y, scoring=scoring, cv=cv, return_train_score=True,\r\n                                return_estimator=True)\r\n        self.estimator = scores['estimator']\r\n        return -scores['train_neg_root_mean_squared_error'].mean(), scores['train_r2'].mean(), -scores[\r\n            'test_neg_root_mean_squared_error'].mean(), scores['test_r2'].mean(), scores['estimator']\r\n\r\n    def calculate(self, x_train, x_test, y_train, y_test):\r\n        self.model.fit(x_train, y_train)\r\n        rmse = np.sqrt(mse(y_train, self.model.predict(x_train)))\r\n        r2 = r2_score(y_train, self.model.predict(x_train))\r\n        rmset = np.sqrt(mse(y_test, self.model.predict(x_test)))\r\n        r2t = r2_score(y_test, self.model.predict(x_test))\r\n        print('pre:', self.model.predict(x_test))\r\n        print(y_test)\r\n        print(rmse)\r\n        print(r2)\r\n        print(rmset)\r\n        print(r2t)\r\n        return r2\r\n"
  },
  {
    "path": "Models/ETR.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport numpy as np\r\nfrom sklearn.tree import ExtraTreeRegressor as etr\r\nfrom sklearn.metrics import mean_squared_error as mse\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.model_selection import train_test_split,cross_validate,GridSearchCV\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\n\r\nclass ETR(object):\r\n    def __init__(self):\r\n        self.params_defualt = {'criterion': 'mse',\r\n                                 'splitter': 'random',\r\n                                 'max_depth': None,\r\n                                 'min_samples_split': 2,\r\n                                 'min_samples_leaf': 1,\r\n                                 'min_weight_fraction_leaf': 0.0,\r\n                                 'max_features': 'auto',\r\n                                 'random_state': 16,\r\n                                 'min_impurity_decrease': 0,\r\n                                 'max_leaf_nodes': None,\r\n                                 'ccp_alpha': 0.0}\r\n        self.tuned_parameters = {'criterion': ['mse'],\r\n                                'splitter': ['random'],\r\n                                'max_depth': [None],\r\n                                'min_samples_split': [2],\r\n                                'min_samples_leaf': [1],\r\n                                'min_weight_fraction_leaf': [0.0],\r\n                                'max_features': ['auto'],\r\n                                'random_state': [16],\r\n                                'min_impurity_decrease': [0],\r\n                                'max_leaf_nodes': [None],\r\n                                'ccp_alpha': [0.0]}\r\n\r\n    def auto_tune_params(self, x_train, y_train):\r\n        # use RMSE as the scoring\r\n        clf = GridSearchCV(\r\n            etr(), self.tuned_parameters, scoring='neg_root_mean_squared_error'\r\n        )\r\n        clf.fit(x_train, y_train)\r\n\r\n        print(\"Best parameters set found on development set:\")\r\n        print()\r\n        print(clf.best_params_)\r\n        self.params_defualt = clf.best_params_\r\n\r\n    def modify_params(self, params):\r\n        for key in params:\r\n            self.params_defualt[key] = params[key]\r\n\r\n    def build_model(self):\r\n        self.model = etr(**self.params_defualt)\r\n\r\n    def model_evaluate(self, x, y, cv):\r\n        scoring = ['neg_root_mean_squared_error', 'r2']\r\n        scores = cross_validate(self.model, x, y, scoring=scoring, cv=cv, return_train_score=True,\r\n                                return_estimator=True)\r\n        self.estimator = scores['estimator']\r\n        return -scores['train_neg_root_mean_squared_error'].mean(), scores['train_r2'].mean(), -scores[\r\n            'test_neg_root_mean_squared_error'].mean(), scores['test_r2'].mean(), scores['estimator']\r\n\r\n    def calculate(self, x_train, x_test, y_train, y_test):\r\n        self.model.fit(x_train, y_train)\r\n        rmse = np.sqrt(mse(y_train, self.model.predict(x_train)))\r\n        r2 = r2_score(y_train, self.model.predict(x_train))\r\n        rmset = np.sqrt(mse(y_test, self.model.predict(x_test)))\r\n        r2t = r2_score(y_test, self.model.predict(x_test))\r\n        print('pre:', self.model.predict(x_test))\r\n        print(y_test)\r\n        print(rmse)\r\n        print(r2)\r\n        print(rmset)\r\n        print(r2t)\r\n        print(self.model.feature_importances_)\r\n        return r2\r\n"
  },
  {
    "path": "Models/FNN.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport torch\r\nfrom torch import nn\r\nimport torch.nn.functional as F\r\nimport torch.utils.data as Data\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nimport numpy as np\r\nfrom torchvision import datasets, transforms\r\nfrom torch.nn import init\r\nfrom sklearn.metrics import mean_squared_error as mse\r\nfrom sklearn.metrics import r2_score\r\n\r\n\r\nclass FNN(object):\r\n    def __init__(self, x_train, x_test, y_train, y_test, params, model_evaluation, x, y):\r\n        self.x = x\r\n        self.y = y\r\n        self.x_train = torch.from_numpy(x_train).type(torch.FloatTensor)\r\n        y_train = torch.from_numpy(y_train).type(torch.FloatTensor)\r\n        self.x_test = torch.from_numpy(x_test).type(torch.FloatTensor)\r\n        y_test = torch.from_numpy(y_test).type(torch.FloatTensor)\r\n        self.y_test = torch.unsqueeze(y_test, 1)\r\n        self.y_train = torch.unsqueeze(y_train, 1)\r\n        self.params_defualt = {'BATCH_SIZE': 32,\r\n                                'LR': 0.05,\r\n                                'EPOCH': 50}\r\n        self.params_modify = params\r\n        self.modify_params()\r\n        torch_data = Data.TensorDataset(self.x_train, self.y_train)\r\n        self.loader = Data.DataLoader(dataset=torch_data, batch_size=self.params_defualt['BATCH_SIZE'], shuffle=True)\r\n        self.calculate()\r\n\r\n    def modify_params(self):\r\n        for key in self.params_modify:\r\n            self.params_defualt[key] = self.params_modify[key]\r\n\r\n    def calculate(self):\r\n        adam_net = Net()\r\n\r\n        opt_adam = torch.optim.Adam(adam_net.parameters(), lr=self.params_defualt['LR'])\r\n        loss_func = nn.MSELoss()\r\n\r\n        all_loss = {}\r\n        for epoch in range(self.params_defualt['EPOCH']):\r\n            print('epoch', epoch)\r\n            for step, (b_x, b_y) in enumerate(self.loader):\r\n                print('step', step)\r\n                pre = adam_net(b_x)\r\n                loss = loss_func(pre, b_y)\r\n                opt_adam.zero_grad()\r\n                loss.backward()\r\n                opt_adam.step()\r\n                all_loss[epoch + 1] = loss\r\n        print(all_loss)\r\n\r\n        yt = self.y_train.numpy()\r\n        yp = adam_net(self.x_train)\r\n        yp = yp.detach().numpy()\r\n        rmse = np.sqrt(mse(yt, yp))\r\n        r2 = r2_score(yt, yp)\r\n        yt1 = self.y_test.numpy()\r\n        yp1 = adam_net(self.x_test)\r\n        yp1 = yp1.detach().numpy()\r\n        rmset = np.sqrt(mse(yt1, yp1))\r\n        r2t = r2_score(yt1, yp1)\r\n        print(rmse)\r\n        print(r2)\r\n        print(rmset)\r\n        print(r2t)\r\n\r\n\r\nclass Net(nn.Module):\r\n    def __init__(self):\r\n        super(Net, self).__init__()\r\n        self.hidden = nn.Linear(20, 32)\r\n        self.predict = nn.Linear(32, 1)\r\n\r\n    def forward(self, x):\r\n        x = F.relu(self.hidden(x))\r\n        x = self.predict(x)\r\n        return x\r\n\r\n\r\ndef weights_init(m):\r\n    if isinstance(m, nn.Linear):\r\n        init.kaiming_normal(m.weight.data)\r\n\r\n\r\n\r\n"
  },
  {
    "path": "Models/GBR.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport numpy as np\r\nfrom sklearn.ensemble import GradientBoostingRegressor as gbr\r\nfrom sklearn.metrics import mean_squared_error as mse\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.model_selection import train_test_split, cross_validate, cross_val_score, GridSearchCV\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nclass GBR(object):\r\n    def __init__(self):\r\n        self.params_defualt = {'n_estimators': 500,\r\n                                  'max_depth': 5,\r\n                                  'min_samples_split': 5,\r\n                                  'learning_rate': 0.005,\r\n                                  'loss': 'huber'}\r\n        self.tuned_parameters ={'n_estimators': [500],\r\n                                'max_depth': [5],\r\n                                'min_samples_split': [5],\r\n                                'learning_rate': [0.005, 0.01],\r\n                                'loss': ['huber']}\r\n\r\n    def auto_tune_params(self, x_train, y_train):\r\n        #use RMSE as the scoring\r\n        clf = GridSearchCV(\r\n            gbr(), self.tuned_parameters, scoring='neg_root_mean_squared_error'\r\n        )\r\n        clf.fit(x_train, y_train)\r\n\r\n        print(\"Best parameters set found on development set:\")\r\n        print()\r\n        print(clf.best_params_)\r\n        self.params_defualt = clf.best_params_\r\n\r\n    def modify_params(self, params):\r\n        for key in params:\r\n            self.params_defualt[key] = params[key]\r\n\r\n    def build_model(self):\r\n        self.model = gbr(**self.params_defualt)\r\n\r\n    def model_evaluate(self, x, y, cv):\r\n        scoring = ['neg_root_mean_squared_error', 'r2']\r\n        scores = cross_validate(self.model, x, y, scoring=scoring, cv=cv, return_train_score=True, return_estimator=True)\r\n        self.estimator = scores['estimator']\r\n        return -scores['train_neg_root_mean_squared_error'].mean(), scores['train_r2'].mean(),-scores['test_neg_root_mean_squared_error'].mean(), scores['test_r2'].mean(), scores['estimator']\r\n        #scores1 = cross_val_score(self.model, x, y, cv=cv, scoring='neg_root_mean_squared_error')\r\n        #return scores1,scores1\r\n\r\n    def calculate(self, x_train, x_test, y_train, y_test):\r\n        return self.model.feature_importances_\r\n\r\n\r\n"
  },
  {
    "path": "Models/GPR.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport numpy as np\r\nfrom sklearn.gaussian_process import GaussianProcessRegressor as gpr\r\nfrom sklearn.gaussian_process.kernels import RBF, ConstantKernel\r\nfrom sklearn.metrics import mean_squared_error as mse\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.model_selection import train_test_split,cross_validate,GridSearchCV\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nclass GPR(object):\r\n    def __init__(self):\r\n        self.params_defualt = {}\r\n        self.tuned_parameters = {}\r\n\r\n    def auto_tune_params(self, x_train, y_train):\r\n        # use RMSE as the scoring\r\n        clf = GridSearchCV(\r\n            gpr(), self.tuned_parameters, scoring='neg_root_mean_squared_error'\r\n        )\r\n        clf.fit(x_train, y_train)\r\n\r\n        print(\"Best parameters set found on development set:\")\r\n        print()\r\n        print(clf.best_params_)\r\n        self.params_defualt = clf.best_params_\r\n\r\n    def modify_params(self, params):\r\n        for key in params:\r\n            self.params_defualt[key] = params[key]\r\n\r\n    def build_model(self):\r\n        self.model = gpr(**self.params_defualt)\r\n\r\n    def model_evaluate(self, x, y, cv):\r\n        scoring = ['neg_root_mean_squared_error', 'r2']\r\n        scores = cross_validate(self.model, x, y, scoring=scoring, cv=cv, return_train_score=True,\r\n                                return_estimator=True)\r\n        self.estimator = scores['estimator']\r\n        return -scores['train_neg_root_mean_squared_error'].mean(), scores['train_r2'].mean(), -scores[\r\n            'test_neg_root_mean_squared_error'].mean(), scores['test_r2'].mean(), scores['estimator']\r\n\r\n    def calculate(self, x_train, x_test, y_train, y_test):\r\n        self.model.fit(x_train, y_train)\r\n        rmse = np.sqrt(mse(y_train, self.model.predict(x_train)))\r\n        r2 = r2_score(y_train, self.model.predict(x_train))\r\n        rmset = np.sqrt(mse(y_test, self.model.predict(x_test)))\r\n        r2t = r2_score(y_test, self.model.predict(x_test))\r\n        print('pre:', self.model.predict(x_test))\r\n        print(y_test)\r\n        print(rmse)\r\n        print(r2)\r\n        print(rmset)\r\n        print(r2t)\r\n        return r2\r\n"
  },
  {
    "path": "Models/KNR.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport numpy as np\r\nfrom sklearn.neighbors import KNeighborsRegressor as knr\r\nfrom sklearn.metrics import mean_squared_error as mse\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.model_selection import train_test_split,cross_validate,GridSearchCV\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\n\r\nclass KNR(object):\r\n    def __init__(self):\r\n        self.params_defualt = {'n_neighbors': 4}\r\n        self.tuned_parameters = {'n_neighbors': [4,5]}\r\n\r\n    def auto_tune_params(self, x_train, y_train):\r\n        # use RMSE as the scoring\r\n        clf = GridSearchCV(\r\n            knr(), self.tuned_parameters, scoring='neg_root_mean_squared_error'\r\n        )\r\n        clf.fit(x_train, y_train)\r\n\r\n        print(\"Best parameters set found on development set:\")\r\n        print()\r\n        print(clf.best_params_)\r\n        self.params_defualt = clf.best_params_\r\n\r\n    def modify_params(self, params):\r\n        for key in params:\r\n            self.params_defualt[key] = params[key]\r\n\r\n    def build_model(self):\r\n        self.model = knr(**self.params_defualt)\r\n\r\n    def model_evaluate(self, x, y, cv):\r\n        scoring = ['neg_root_mean_squared_error', 'r2']\r\n        scores = cross_validate(self.model, x, y, scoring=scoring, cv=cv, return_train_score=True,\r\n                                return_estimator=True)\r\n        self.estimator = scores['estimator']\r\n        return -scores['train_neg_root_mean_squared_error'].mean(), scores['train_r2'].mean(), -scores[\r\n            'test_neg_root_mean_squared_error'].mean(), scores['test_r2'].mean(), scores['estimator']\r\n\r\n\r\ndef calculate(self, x_train, x_test, y_train, y_test):\r\n        self.model.fit(x_train, y_train)\r\n        rmse = np.sqrt(mse(y_train, self.model.predict(x_train)))\r\n        r2 = r2_score(y_train, self.model.predict(x_train))\r\n        rmset = np.sqrt(mse(y_test, self.model.predict(x_test)))\r\n        r2t = r2_score(y_test, self.model.predict(x_test))\r\n        print('pre:', self.model.predict(x_test))\r\n        print(y_test)\r\n        print(rmse)\r\n        print(r2)\r\n        print(rmset)\r\n        print(r2t)\r\n        return r2\r\n"
  },
  {
    "path": "Models/KRR.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport numpy as np\r\nfrom sklearn.kernel_ridge import KernelRidge as krr\r\nfrom sklearn.metrics import mean_squared_error as mse\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.model_selection import train_test_split,cross_validate,GridSearchCV\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\n\r\nclass KRR(object):\r\n    def __init__(self):\r\n        self.params_defualt = {'alpha': 1,\r\n                                 'kernel': 'linear',\r\n                                 'gamma': 0,\r\n                                 'degree': 3,\r\n                                 'coef0': '1'}\r\n        self.tuned_parameters = {'alpha': [1,2],\r\n                                'kernel': ['linear'],\r\n                                'gamma': [0,0.1],\r\n                                'degree': [3,4],\r\n                                'coef0': ['1']}\r\n\r\n    def auto_tune_params(self, x_train, y_train):\r\n        #use RMSE as the scoring\r\n        clf = GridSearchCV(\r\n            krr(), self.tuned_parameters, scoring='neg_root_mean_squared_error'\r\n        )\r\n        clf.fit(x_train, y_train)\r\n\r\n        print(\"Best parameters set found on development set:\")\r\n        print()\r\n        print(clf.best_params_)\r\n        self.params_defualt = clf.best_params_\r\n\r\n    def modify_params(self, params):\r\n        for key in params:\r\n            self.params_defualt[key] = params[key]\r\n\r\n    def build_model(self):\r\n        self.model = krr(**self.params_defualt)\r\n\r\n    def model_evaluate(self, x, y, cv):\r\n        scoring = ['neg_root_mean_squared_error', 'r2']\r\n        scores = cross_validate(self.model, x, y, scoring=scoring, cv=cv, return_train_score=True,\r\n                                return_estimator=True)\r\n        self.estimator = scores['estimator']\r\n        return -scores['train_neg_root_mean_squared_error'].mean(), scores['train_r2'].mean(), -scores[\r\n            'test_neg_root_mean_squared_error'].mean(), scores['test_r2'].mean(), scores['estimator']\r\n\r\n    def calculate(self, x_train, x_test, y_train, y_test):\r\n        self.model.fit(x_train, y_train)\r\n        rmse = np.sqrt(mse(y_train, self.model.predict(x_train)))\r\n        r2 = r2_score(y_train, self.model.predict(x_train))\r\n        rmset = np.sqrt(mse(y_test, self.model.predict(x_test)))\r\n        r2t = r2_score(y_test, self.model.predict(x_test))\r\n        print('pre:', self.model.predict(x_test))\r\n        print(y_test)\r\n        print(rmse)\r\n        print(r2)\r\n        print(rmset)\r\n        print(r2t)\r\n        return r2\r\n"
  },
  {
    "path": "Models/Lasso.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport numpy as np\r\nfrom sklearn.linear_model import Lasso as lso\r\nfrom sklearn.metrics import mean_squared_error as mse\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.model_selection import train_test_split,cross_validate,GridSearchCV\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\n\r\nclass LSO(object):\r\n    def __init__(self):\r\n        self.params_defualt = {'alpha': 1.0,\r\n                                 'fit_intercept': True,\r\n                                 'normalize': False,\r\n                                 'precompute': False,\r\n                                 'max_iter': 1000,\r\n                                 'tol': 1e-4,\r\n                                 'warm_start': False,\r\n                                 'positive': False,\r\n                                 'selection': 'cyclic',\r\n                                 'random_state': 8}\r\n        self.tuned_parameters = {'alpha': [1.0],\r\n                                'fit_intercept': [True],\r\n                                'normalize': [False],\r\n                                'precompute': [False],\r\n                                'max_iter': [1000],\r\n                                'tol': [1e-4],\r\n                                'warm_start': [False],\r\n                                'positive': [False],\r\n                                'selection': ['cyclic'],\r\n                                'random_state': [8]}\r\n\r\n    def auto_tune_params(self, x_train, y_train):\r\n        # use RMSE as the scoring\r\n        clf = GridSearchCV(\r\n            lso(), self.tuned_parameters, scoring='neg_root_mean_squared_error'\r\n        )\r\n        clf.fit(x_train, y_train)\r\n\r\n        print(\"Best parameters set found on development set:\")\r\n        print()\r\n        print(clf.best_params_)\r\n        self.params_defualt = clf.best_params_\r\n\r\n    def modify_params(self, params):\r\n        for key in params:\r\n            self.params_defualt[key] = params[key]\r\n\r\n    def build_model(self):\r\n        self.model = lso(**self.params_defualt)\r\n\r\n    def model_evaluate(self, x, y, cv):\r\n        scoring = ['neg_root_mean_squared_error', 'r2']\r\n        scores = cross_validate(self.model, x, y, scoring=scoring, cv=cv, return_train_score=True,\r\n                                return_estimator=True)\r\n        self.estimator = scores['estimator']\r\n        return -scores['train_neg_root_mean_squared_error'].mean(), scores['train_r2'].mean(), -scores[\r\n            'test_neg_root_mean_squared_error'].mean(), scores['test_r2'].mean(), scores['estimator']\r\n\r\n    def calculate(self, x_train, x_test, y_train, y_test):\r\n        self.model.fit(x_train, y_train)\r\n        rmse = np.sqrt(mse(y_train, self.model.predict(x_train)))\r\n        r2 = r2_score(y_train, self.model.predict(x_train))\r\n        rmset = np.sqrt(mse(y_test, self.model.predict(x_test)))\r\n        r2t = r2_score(y_test, self.model.predict(x_test))\r\n        print('pre:', self.model.predict(x_test))\r\n        print(y_test)\r\n        print(rmse)\r\n        print(r2)\r\n        print(rmset)\r\n        print(r2t)\r\n        return r2\r\n"
  },
  {
    "path": "Models/MLP.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport numpy as np\r\nfrom sklearn.neural_network import MLPRegressor as mlp\r\nfrom sklearn.metrics import mean_squared_error as mse\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.model_selection import train_test_split, cross_validate,GridSearchCV\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\n\r\nclass MLP(object):\r\n    def __init__(self):\r\n        self.params_defualt = {'learning_rate': 'constant',\r\n                                 'learning_rate_init': 0.001,\r\n                                 'batch_size': 4,\r\n                                 'hidden_layer_sizes': (20, 32),\r\n                                 'random_state': 1,\r\n                                 'max_iter': 100000,\r\n                                 'activation': 'logistic'}\r\n        self.tuned_parameters = {'learning_rate':['constant'],\r\n                               'learning_rate_init':[0.001],\r\n                               'batch_size':[4],\r\n                                'hidden_layer_sizes':[(20, 32)],\r\n                                'random_state':[1],\r\n                                'max_iter':[100000],\r\n                                'activation':['logistic']}\r\n\r\n    def auto_tune_params(self, x_train, y_train):\r\n        #use RMSE as the scoring\r\n        clf = GridSearchCV(\r\n            mlp(), self.tuned_parameters, scoring='neg_root_mean_squared_error'\r\n        )\r\n        clf.fit(x_train, y_train)\r\n\r\n        print(\"Best parameters set found on development set:\")\r\n        print()\r\n        print(clf.best_params_)\r\n        self.params_defualt = clf.best_params_\r\n\r\n    def modify_params(self, params):\r\n        for key in params:\r\n            self.params_defualt[key] = params[key]\r\n\r\n    def build_model(self):\r\n        self.model = mlp(**self.params_defualt)\r\n\r\n    def model_evaluate(self, x, y, cv):\r\n        scoring = ['neg_root_mean_squared_error', 'r2']\r\n        scores = cross_validate(self.model, x, y, scoring=scoring, cv=cv, return_train_score=True,\r\n                                return_estimator=True)\r\n        self.estimator = scores['estimator']\r\n        return -scores['train_neg_root_mean_squared_error'].mean(), scores['train_r2'].mean(), -scores[\r\n            'test_neg_root_mean_squared_error'].mean(), scores['test_r2'].mean(), scores['estimator']\r\n\r\n    def calculate(self, x_train, x_test, y_train, y_test):\r\n        self.model.fit(x_train, y_train)\r\n        rmse = np.sqrt(mse(y_train, self.model.predict(x_train)))\r\n        r2 = r2_score(y_train, self.model.predict(x_train))\r\n        rmset = np.sqrt(mse(y_test, self.model.predict(x_test)))\r\n        r2t = r2_score(y_test, self.model.predict(x_test))\r\n        print('pre:', self.model.predict(x_test))\r\n        print(y_test)\r\n        print(rmse)\r\n        print(r2)\r\n        print(rmset)\r\n        print(r2t)\r\n        return r2\r\n"
  },
  {
    "path": "Models/RFR.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport numpy as np\r\nfrom sklearn.ensemble import RandomForestRegressor as rfr\r\nfrom sklearn.metrics import mean_squared_error as mse\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.model_selection import train_test_split,cross_validate,GridSearchCV\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nclass RFR(object):\r\n    def __init__(self):\r\n        self.params_defualt = {'n_estimators': 500,\r\n                                 'max_depth': None,\r\n                                 'min_samples_split': 2,\r\n                                 'min_samples_leaf': 1,\r\n                                 'max_features': 'auto',\r\n                                 'max_leaf_nodes': None,\r\n                                 'warm_start': False,\r\n                                 'verbose': 0,\r\n                                 'ccp_alpha': 0.0,\r\n                                 'max_samples': None}\r\n        self.tuned_parameters = {'n_estimators': [500],\r\n                                #'criterion': [mse],\r\n                                'max_depth': [None],\r\n                                'min_samples_split': [2],\r\n                                'min_samples_leaf': [1],\r\n                                'max_features' : ['auto'],\r\n                                'max_leaf_nodes': [None],\r\n                                'warm_start': [False],\r\n                                'verbose': [0],\r\n                                'ccp_alpha': [0.0],\r\n                                'max_samples': [None]}\r\n\r\n    def auto_tune_params(self, x_train, y_train):\r\n        # use RMSE as the scoring\r\n        clf = GridSearchCV(\r\n            rfr(), self.tuned_parameters, scoring='neg_root_mean_squared_error'\r\n        )\r\n        clf.fit(x_train, y_train)\r\n\r\n        print(\"Best parameters set found on development set:\")\r\n        print()\r\n        print(clf.best_params_)\r\n        self.params_defualt = clf.best_params_\r\n\r\n    def modify_params(self, params):\r\n        for key in params:\r\n            self.params_defualt[key] = params[key]\r\n\r\n    def build_model(self):\r\n        self.model = rfr(**self.params_defualt)\r\n\r\n    def model_evaluate(self, x, y, cv):\r\n        scoring = ['neg_root_mean_squared_error', 'r2']\r\n        scores = cross_validate(self.model, x, y, scoring=scoring, cv=cv, return_train_score=True,\r\n                                return_estimator=True)\r\n        self.estimator = scores['estimator']\r\n        return -scores['train_neg_root_mean_squared_error'].mean(), scores['train_r2'].mean(), -scores[\r\n            'test_neg_root_mean_squared_error'].mean(), scores['test_r2'].mean(), scores['estimator']\r\n\r\n    def calculate(self, x_train, x_test, y_train, y_test):\r\n        self.model.fit(x_train, y_train)\r\n        rmse = np.sqrt(mse(y_train, self.model.predict(x_train)))\r\n        r2 = r2_score(y_train, self.model.predict(x_train))\r\n        rmset = np.sqrt(mse(y_test, self.model.predict(x_test)))\r\n        r2t = r2_score(y_test, self.model.predict(x_test))\r\n        print('pre:', self.model.predict(x_test))\r\n        print(y_test)\r\n        print(rmse)\r\n        print(r2)\r\n        print(rmset)\r\n        print(r2t)\r\n        print(self.model.feature_importances_)\r\n        return r2\r\n"
  },
  {
    "path": "Models/SVR.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport numpy as np\r\nfrom sklearn.svm import SVR as svr\r\nfrom sklearn.metrics import mean_squared_error as mse\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.model_selection import train_test_split,cross_validate,GridSearchCV\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nclass SVR(object):\r\n    def __init__(self):\r\n        self.params_defualt = {}\r\n        self.tuned_parameters = {}\r\n\r\n    def auto_tune_params(self, x_train, y_train):\r\n        #use RMSE as the scoring\r\n        clf = GridSearchCV(\r\n            svr(), self.tuned_parameters, scoring='neg_root_mean_squared_error'\r\n        )\r\n        clf.fit(x_train, y_train)\r\n\r\n        print(\"Best parameters set found on development set:\")\r\n        print()\r\n        print(clf.best_params_)\r\n        self.params_defualt = clf.best_params_\r\n\r\n    def modify_params(self, params):\r\n        for key in params:\r\n            self.params_defualt[key] = params[key]\r\n\r\n    def build_model(self):\r\n        self.model = svr(**self.params_defualt)\r\n\r\n    def model_evaluate(self, x, y, cv):\r\n        scoring = ['neg_root_mean_squared_error', 'r2']\r\n        scores = cross_validate(self.model, x, y, scoring=scoring, cv=cv, return_train_score=True,\r\n                                return_estimator=True)\r\n        self.estimator = scores['estimator']\r\n        return -scores['train_neg_root_mean_squared_error'].mean(), scores['train_r2'].mean(), -scores[\r\n            'test_neg_root_mean_squared_error'].mean(), scores['test_r2'].mean(), scores['estimator']\r\n\r\n    def calculate(self, x_train, x_test, y_train, y_test):\r\n        self.model.fit(x_train, y_train)\r\n        rmse = np.sqrt(mse(y_train, self.model.predict(x_train)))\r\n        r2 = r2_score(y_train, self.model.predict(x_train))\r\n        rmset = np.sqrt(mse(y_test, self.model.predict(x_test)))\r\n        r2t = r2_score(y_test, self.model.predict(x_test))\r\n        print('pre:', self.model.predict(x_test))\r\n        print(y_test)\r\n        print(rmse)\r\n        print(r2)\r\n        print(rmset)\r\n        print(r2t)\r\n        return r2\r\n"
  },
  {
    "path": "README.md",
    "content": "# DMCP:DFT-based Machine learning method for Captureing Property Relationship with Structures\nDMCP is aimed to implement DFT-based and Machine-learning-accelerated (DFT-ML) scheme for captureing QSPR in intricate system . It is possible to predict the property of intricate system  such as HEAs and to reveal the intrinsic descriptors which determine the underlying property of them with appropriate algorithm and train data features.\n# Developer:\nDMCP is developed within Prof. Yuzheng Guo's group in Wuhan University, in colloboration with Prof. John Robertson's group in Cambridge University.\nCore developer: Xuhao Wan, Yuzheng Guo\nEmail: xhwanrm@whu.edu.cn, yguo@whu.edu.cn\n# Major Features\n1. Ten machine learning algorithms: GBR, KNR, SVR, GPR, FNN, RFR, ETR, KRR, LASSO, and ENR.\n2. Multiple methods to improve model accuracy: dataset split, cross validation, repeated trails. \n3. Visualization module for research.\n# Prerequisites\n1. Generally, you need some data obtained from DFT calculations such as VASP, QE, and CP2K or available material database.\n2. DMCP requires Python 3 with the packages specified in requirements.txt. This is taken care of by pip.\n# Citation\nIf you use DMCP in your research, please cite the following paper:\n1. X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. doi.org/10.1016/j.matre.2021.100046.\n# Reference\nThe work applied DMCP are listed as following:\n1. Dou B, Zhu Z, Merkurjev E, et al. Machine learning methods for small data challenges in molecular science. Chemical Reviews, 2023, 123(13): 8736-8780.\n2. Tamtaji M, Gao H, Hossain M D, et al. Machine learning for design principles for single atom catalysts towards electrochemical reactions. Journal of Materials Chemistry A, 2022, 10(29): 15309-15331.\n3. Liu X, Zhang Y, Wang W, et al. Transition metal and N doping on AlP monolayers for bifunctional oxygen electrocatalysts: density functional theory study assisted by machine learning description. ACS Applied Materials & Interfaces, 2021, 14(1): 1249-1259.\n4. Huang Y, Rehman F, Tamtaji M, et al. Mechanistic understanding and design of non-noble metal-based single-atom catalysts supported on two-dimensional materials for CO 2 electroreduction. Journal of Materials Chemistry A, 2022, 10(11): 5813-5834.\n5. Liu T, Zhao X, Liu X, et al. Understanding the hydrogen evolution reaction activity of doped single-atom catalysts on two-dimensional GaPS4 by DFT and machine learning. Journal of Energy Chemistry, 2023, 81: 93-100.\n6. X. Wan, Z. Zhang*, H. Niu, Y. Yin, C. Kuai, J. Wang, C. Shao, Y. Guo*, Machine-Learning-Accelerated Catalytic Activity Predictions of Transition Metal Phthalocyanine Dual-Metal-Sites Catalysts for CO2 Reduction. The Journal of Physical Chemistry Letters, 2021.\n7. H. Niu#, X. Wan#, X. Wang, C. Chen, J. Robertson, Z. Zhang*, Y. Guo*, Single-Atom Rhodium on Defective g-C3N4: A Promising Bifunctional Oxygen Electrocatalyst. ACS Sustainable Chem. Eng., 9(9), 3590-3599, 2021.\n8. Wan X, Yu W, Niu H, ea al. Revealing the Oxygen Reduction/Evoluti on Reaction Activity Origin of Carbon-Nitride-Related Single-Atom catalysts: Quantum Chemistry in Artificial Intelligence. Chemical Engineering Journal. 2022，440:135946.\n16. Khrabrov K, Shenbin I, Ryabov A, et al. nablaDFT: Large-Scale Conformational Energy and Hamiltonian Prediction benchmark and dataset. Physical Chemistry Chemical Physics, 2022, 24(42): 25853-25863.\n17. Pant D, Pokharel S, Mandal S, et al. DFT-aided machine learning-based discovery of magnetism in Fe-based bimetallic chalcogenides. Scientific Reports, 2023, 13(1): 3277.\n\n# Tips\nWelcome to join the DMCP exchange Wechat group.\n欢迎加入DMCP微信交流群。\n\n![9eb7685d4afb615aaf0f70843d8895c](https://user-images.githubusercontent.com/73831094/146893430-46b61a00-f54d-423f-98b6-a8413216c8d5.jpg)\n\nIf it is invalid, please contact us by Email.\n"
  },
  {
    "path": "Visualization/Violin.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\nclass plot_Violin(object):\r\n    def __init__(self, title, data_dict):\r\n        model_list = []\r\n        data_list = []\r\n        #for key in data_dict.keys():\r\n            #model_list.append(key)\r\n            #data_list.append(data_dict[key])\r\n        tips = pd.DataFrame.from_dict(data_dict)\r\n        #tips = np.array(data_list).T\r\n        sns.set(style='ticks', font='Times New Roman', font_scale=1.8)\r\n        fig = plt.figure(figsize=(9, 6))\r\n        ax=sns.violinplot(data=tips,\r\n                       split=True,\r\n                       linewidth = 2, #线宽\r\n                       width = 0.8,   #箱之间的间隔比例\r\n                       palette = 'pastel', #设置调色板\r\n                       #order = model_list, #筛选类别\r\n                       # scale = 'count',  #测度小提琴图的宽度： area-面积相同,count-按照样本数量决定宽度,width-宽度一样\r\n                       gridsize = 50, #设置小提琴图的平滑度，越高越平滑\r\n                       # inner = 'box', #设置内部显示类型 --> 'box','quartile','point','stick',None\r\n                       #bw = 0.8      #控制拟合程度，一般可以不设置\r\n                       )\r\n        ax.set_ylabel(title + 'Score', fontsize=28, fontfamily='Times New Roman')\r\n        ax.set_xlabel('model', fontsize=28, fontfamily='Times New Roman')\r\n        #ax = fig.add_subplot(111)\r\n\r\n        fig.savefig('volin_' + title + '.jpg', bbox_inches='tight')\r\n        plt.show()\r\n\r\n#data_dict = {'GBR': [-0.32744250093837, -0.3958306749566164], 'KNR': [-0.3522280594983168, -0.3593963196775159], 'SVR': [-0.34374401871141413, -0.34763844616449313], 'GPR': [-0.3802567562102421, -0.3864921408452695], 'MLP': [-0.3912175478421521, -0.39099797659443214], 'RFR': [-0.34453840142807285, -0.3616486858237452], 'ETR': [-0.41477900933292655, -0.49834959923665945], 'KRR': [-0.4050557476038392, -0.41178328305680595], 'LSO': [-0.3890709246795953, -0.3890709246795953], 'ENR': [-0.3890709246795953, -0.3890709246795953]}\r\n#data_show = plot_Violin(data_dict)\r\n"
  },
  {
    "path": "Visualization/bar.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom statistics import mean\r\n\r\nclass plot_bar(object):\r\n    def __init__(self, title, train_set_RMSE, train_set_R2, test_set_RMSE, test_set_R2):\r\n        data_list2 = []\r\n        for item in [train_set_RMSE, train_set_R2, test_set_RMSE, test_set_R2]:\r\n            data_list1 = []\r\n            model_list = []\r\n            for i in item.keys():\r\n                data_list1.append(mean(item[i]))\r\n                model_list.append(i)\r\n            data_list2.append(data_list1)\r\n        font={'family':'Times New Roman', 'weight':'normal', 'size':20}\r\n        ##取出 4个dict中的数据，注意dict中的数据存放是无序的\r\n        R2train = data_list2[1]  # train set R2 score\r\n        R2test = data_list2[3]# test set R2 score\r\n        RMSEtrain = data_list2[0]  # train set RMSE\r\n        RMSEtest = data_list2[2] # test set RMSE\r\n        #for item in [R2train, R2test, RMSEtrain, RMSEtest]:\r\n            #for i in range(len(item)):\r\n                #if item[i] < 0:\r\n                    #item[i] = 0.5\r\n        label = model_list\r\n        bar_width = 0.4\r\n        bar_x = np.arange(len(label))\r\n\r\n\r\n        fig1 = plt.figure(figsize=(9, 6))\r\n        ax1 = fig1.add_subplot(111)\r\n        #ax1.set_title('RMSE')\r\n        bar1 = ax1.bar(x=bar_x - bar_width/2,   # 设置不同的x起始位置\r\n                      height= RMSEtrain, width=bar_width, color='royalblue')\r\n        bar2 = ax1.bar(x=bar_x + bar_width/2,   # 设置不同的x起始位置\r\n                      height= RMSEtest, width=bar_width, color='darkorange'\r\n                )\r\n\r\n        ax1.set_ylabel('RMSE /eV', fontsize=24, fontfamily='Times New Roman')\r\n        ax1.set_xticks(range(len(label)))\r\n        ax1.set_xticklabels(label, fontsize=20, fontfamily='Times New Roman')\r\n        #ax1.set_yticklabels(np.around((np.arange(0, 0.4, 0.05)), decimals=2), fontsize=20, fontfamily='Times New Roman')\r\n        ax1.legend((bar1, bar2), ('Train set', 'Test set'), prop=font)\r\n\r\n        fig2 = plt.figure(figsize=(9, 6))\r\n        ax2 = fig2.add_subplot(111)\r\n        #ax1.set_title('RMSE')\r\n        bar1 = ax2.bar(x=bar_x - bar_width/2,   # 设置不同的x起始位置\r\n                      height= R2train, width=bar_width, color='royalblue')\r\n        bar2 = ax2.bar(x=bar_x + bar_width/2,   # 设置不同的x起始位置\r\n                      height= R2test, width=bar_width, color='darkorange'\r\n                )\r\n\r\n        ax2.set_ylabel('Score', fontsize=24, fontfamily='Times New Roman')\r\n        ax2.set_xticks(range(len(label)))\r\n        ax2.set_xticklabels(label, fontsize=20, fontfamily='Times New Roman')\r\n        #ax2.set_yticklabels(np.around((np.arange(0, 1.0, 0.2)), decimals=2), fontsize=20, fontfamily='Times New Roman')\r\n        ax2.legend((bar1, bar2), ('Train set', 'Test set'), prop=font)\r\n\r\n        fig1.savefig('bar_RMSE.jpg')\r\n        fig2.savefig('bar2_R2.jpg')\r\n        plt.show()\r\n"
  },
  {
    "path": "Visualization/pearson.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport numpy as np\r\nfrom sklearn.ensemble import GradientBoostingRegressor as GBR\r\nfrom sklearn.metrics import mean_squared_error as mse\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom scipy.stats import pearsonr\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\nclass plot_pearson(object):\r\n    def __init__(self, optimal_model_name,corelation):\r\n        pearson_r2 = []\r\n        for i in range(corelation.shape[1]):\r\n            pearson_r1 = []\r\n            for j in range(corelation.shape[1]):\r\n                r, _ = pearsonr(corelation[:][i], corelation[:][j])\r\n                pearson_r1.append(r)\r\n            pearson_r2.append(pearson_r1)\r\n\r\n        ax1 = sns.heatmap(pearson_r2, vmin=-1, vmax=1, cmap='RdBu')\r\n        plt.title(optimal_model_name)\r\n        plt.savefig('Pcdac_pearson.jpg')\r\n        plt.show()\r\n\r\n\r\n"
  },
  {
    "path": "Visualization/pie.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nclass plot_pie(object):\r\n    def __init__(self, optimal_model_name, feature_importance):\r\n        font={'family':'Times New Roman', 'weight':'normal', 'size': 18}\r\n        cmap = plt.get_cmap(\"tab20\")\r\n        colors = cmap(np.arange(len(feature_importance)))\r\n\r\n        labels = ['e_dp1', 'H_f.ox1', 'N_m1', 'χ_1', 'I_m1', 'r_1', 'N_d1', 'Q_1', 'ΔG_COOH*1', 'ΔG_Max1', 'e_dp2','H_f.ox2', 'N_m2', 'χ_2', 'I_m2', 'r_2', 'N_d2', 'Q_2', 'ΔG_COOH*2', 'ΔG_Max2']\r\n        #for i in range(1,len(feature_importance) + 1):\r\n            #labels.append(str('x')+str(i))\r\n\r\n\r\n        fig = plt.figure(figsize=(9, 6))\r\n        ax = fig.add_subplot(111)\r\n\r\n        wedges, text = ax.pie(feature_importance, colors=colors, shadow=True,\r\n                     startangle=90, textprops=font)\r\n        ax.legend(wedges, labels, bbox_to_anchor=(1, 0, 0, 1), fontsize=8)\r\n        plt.title(optimal_model_name)\r\n        fig.savefig('Pcdac_fipie.jpg')\r\n        plt.show()\r\n"
  },
  {
    "path": "Visualization/scatter.py",
    "content": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#author: xuhao wan, wei yu\r\n#If you use DMCP in your research, please cite the following paper:X. Wan, Z. Zhang*, W. Yu, Y. Guo*, A State-of-the-art Density-functional-theory-based and Machine-learning-accelerated Hybrid Method for Intricate System Catalysis. Materials Reports: Energy. 2021.\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nclass plot_scatter(object):\r\n    def __init__(self, y_train_true, y_train_pred, y_test_true, y_test_pred):\r\n        font={'family':'Times New Roman', 'weight':'normal', 'size': 24}\r\n        fig = plt.figure(figsize=(9, 6))\r\n        ax = fig.add_subplot(111)\r\n\r\n        dot1 = ax.scatter(y_train_true, y_train_pred,\r\n                   s=80, c='white', edgecolors='royalblue', marker='o', linewidth=2)\r\n        dot2 = ax.scatter(y_test_true, y_test_pred,\r\n                   s=80, c='white', edgecolors='darkorange', marker='s', linewidth=2)\r\n        line = ax.plot([0,1,2.2], [0,1,2.2], color='k')\r\n\r\n        ax.set_xlabel('$\\mathregular{G_{DFT}}$ /eV', fontsize=24, fontfamily='Times New Roman')\r\n        ax.set_ylabel('$\\mathregular{G_{ML}}$ /eV' , fontsize=24, fontfamily='Times New Roman')\r\n        ax.set_xlim(xmin=0, xmax=2.2)\r\n        ax.set_ylim(ymin=0, ymax=2.2)\r\n        ax.set_xticklabels(np.around((np.arange(0, 2.2, 0.25)), decimals=2), fontsize=20, fontfamily='Times New Roman')\r\n        ax.set_yticklabels(np.around((np.arange(0, 2.2, 0.25)), decimals=2), fontsize=20, fontfamily='Times New Roman')\r\n        ax.legend((dot1, dot2), ('Train set', 'Test set'), prop=font)\r\n\r\n        fig.savefig('Pcdac_scatter.jpg', bbox_inches='tight')\r\n        plt.show()\r\n"
  },
  {
    "path": "manual",
    "content": "The keywords INTRN and OUTDAT are the filename of the input and output data files, respectively. \nOTFIG is the filename prefix of the visualization results generated by DMCP and the format of these figures is optional, including jpg, png, and pdf.\nThe keyword PSCAL controls the feature scaling: OFF, NOR, STA, and REG means no data scaling, normalization, standardization, and regularization processing, respectively. \nThe noise processing is controlled by the keyword PNOSE, and its value determines the distribution range of noises while 0 means the noise processing is not employed. \nOriginal data is reproduced with randomly distributed noises in the scale of -x to x (x is the values of the keyword PNOSE). The keyword PSPLT controls the dataset split and its values are the percentage of the training dataset. The keyword GCRVA controls whether the cross-validation is employed (ON or OFF) while the value of the keyword GREPT is the number of repeated trials. \nWhen the cross-validation and repeated trails are applied together, the value of GREPT is the repeat times of the training procedure in each dataset split and the value of the keyword GCVRN is the number of the rounds of cross-validation.\nThe selected algorithm is determined by the keyword GMODL: the corresponding GMODL values of are GBR (for Gradient Boosted Regression), KNR (k-Neighbor Regression), SVR (Support Vector Regression), GPR (Gaussian Process Regression), FNN (Feedforward Neural Network), RFR (Random Forest Regression), ETR (Extra Trees Regression), KRR (Kernel Ridge Regression), LASSO (Least Absolute Shrinkage and Selection Operator Regression) and ENR (Elastic Net Regression). Several algorithms can be selected at the same time to establish different machine learning models by simply enumerating the corresponding values of GMODL.\nThe model parameters can be provided by the keyword PRX where X represents the abbreviations (also the values of GMODL) of the algorithms.\nThe keyword VVOLN in the visualization module controls the draw of the violin plot. \nThe keyword VCOMP is related to the histogram The keywords that control the switch of the scatter plot and the pie chart are respectively VSCAM and VFTIM and their values are the selected machine learning model which is usually the best performing model. \nThe keyword VPRAS controls whether the Pearson correlation map is drawn.\nTo predict the catalytic performance, the corresponding feature values should be generated and transported into the model at first, which is controlled by the keyword INPRE and its value is the filename of the input data used for prediction. \nAnd the keyword GPREM determines the model used in the prediction process which is usually the best performing model and it is also the switch of the prediction function in DMCP.\n"
  },
  {
    "path": "requirements.txt",
    "content": "#The DMCP Program can be run in either Linux or Windows operation systems utilizing command lines. \n#Please install the following compiler in your operation system to run DMCP.\nnumpy>=1.20.1\nscipy\ntorch\nsklearn\npandas\nmatplotlib\nf90nml\nmultidict \nstatistics\npytest>=4.6\n"
  },
  {
    "path": "setup.py",
    "content": "import setuptools\n\n\nwith open(\"README.md\", \"r\") as fh:\n    long_description = fh.read()\n\nwith open(\"requirements.txt\", \"r\") as fh:\n    dependencies = fh.readlines()\n\nsetuptools.setup(\n    name=\"DMCP\",\n    packages=setuptools.find_packages(exclude=[\"tests\"]),\n    version=\"0.1.2\",\n    author=\"Xuhao Wan\",\n    author_email=\"xhwanrm@whu.edu.cn\",\n    description=\"DMCP\",\n    long_description=long_description,\n    long_description_content_type=\"text/markdown\",\n    url=\"https://github.com/XuhaoWan/DMCP\",\n    python_requires=\">=3.7\",\n    install_requires=dependencies,\n    license=\"GNU\",\n    classifiers=[\n        \"License :: OSI Approved :: GNU  License\",\n        \"Topic :: Scientific/Engineering :: Physics\",\n        \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n        \"Development Status :: 4 - Beta\",\n    ],\n)\n"
  }
]