Full Code of COLA-Laboratory/TransOPT for AI

main de8bf397d59b cached
419 files
1.8 MB
460.3k tokens
2302 symbols
1 requests
Download .txt
Showing preview only (1,971K chars total). Download the full file or copy to clipboard to get everything.
Repository: COLA-Laboratory/TransOPT
Branch: main
Commit: de8bf397d59b
Files: 419
Total size: 1.8 MB

Directory structure:
gitextract__y7m6kto/

├── .gitignore
├── LICENSE
├── MANIFEST.in
├── README.md
├── demo/
│   ├── analysis.py
│   ├── causal_analysis.py
│   ├── comparison/
│   │   ├── analysis_hypervolume.py
│   │   ├── analysis_plot.py
│   │   ├── experiment_gcc.py
│   │   ├── experiment_llvm.py
│   │   ├── features_by_workload_gcc.json
│   │   ├── features_by_workload_gcc_extra.json
│   │   ├── features_by_workload_llvm.json
│   │   ├── plot.py
│   │   ├── plot_samples_dbms.py
│   │   └── start_server.py
│   ├── correlation_analysis.py
│   ├── experiment_lsh_validity.py
│   ├── experiments.py
│   ├── importances/
│   │   ├── cal_relationship.py
│   │   ├── draw_obj_heatmap.py
│   │   └── get_feature_importances.py
│   ├── jacard_exec_times.csv
│   ├── lsh_exec_times.csv
│   ├── optimize_profile.prof
│   ├── random_sample_compiler.py
│   ├── random_sample_dbms.py
│   └── sampling/
│       ├── random_sample_compiler.py
│       └── random_sample_dbms.py
├── docs/
│   ├── Makefile
│   ├── make.bat
│   └── source/
│       ├── _static/
│       │   └── custom.css
│       ├── conf.py
│       ├── development/
│       │   ├── api_reference.rst
│       │   └── architecture.rst
│       ├── faq.rst
│       ├── home/
│       │   ├── feature.html
│       │   ├── guide.html
│       │   └── portfolio.html
│       ├── index.rst
│       ├── installation.rst
│       ├── quickstart.rst
│       └── usage/
│           ├── TOS.bib
│           ├── algorithms.rst
│           ├── cli.rst
│           ├── data_manage.rst
│           ├── problems.rst
│           ├── results.rst
│           └── visualization.rst
├── extra_requirements/
│   ├── analysis.json
│   └── remote.json
├── requirements.txt
├── resources/
│   └── docker/
│       └── absolut_image/
│           ├── Dockerfile
│           └── prepare_antigen.sh
├── scripts/
│   ├── init_csstuning.sh
│   └── init_docker.sh
├── setup.py
├── tests/
│   ├── EXP_NSGA2.py
│   ├── EXP_NSGA2_restart.py
│   ├── EXP_bohb.py
│   ├── EXP_grid.py
│   ├── EXP_hebo.py
│   ├── EXP_hyperopt.py
│   ├── EXP_random.py
│   ├── EXP_smac.py
│   ├── EXP_tpe.py
│   └── data_analysis.py
├── transopt/
│   ├── ResultAnalysis/
│   │   ├── AnalysisBase.py
│   │   ├── AnalysisPipeline.py
│   │   ├── AnalysisReport.py
│   │   ├── CasualAnalysis.py
│   │   ├── CompileTex.py
│   │   ├── CorrelationAnalysis.py
│   │   ├── MakeGif.py
│   │   ├── PFAnalysis.py
│   │   ├── PlotAnalysis.py
│   │   ├── ReportNote.py
│   │   ├── TableAnalysis.py
│   │   ├── TableToLatex.py
│   │   ├── TrackOptimization.py
│   │   └── __init__.py
│   ├── __init__.py
│   ├── agent/
│   │   ├── __init__.py
│   │   ├── app.py
│   │   ├── chat/
│   │   │   ├── openai_chat.py
│   │   │   ├── prompt
│   │   │   ├── prompt.bak
│   │   │   └── yaml_generator.py
│   │   ├── config.py
│   │   ├── registry.py
│   │   ├── run_cli.py
│   │   ├── services.py
│   │   └── testood.py
│   ├── analysis/
│   │   ├── compile_tex.py
│   │   ├── effect_size.py
│   │   ├── mds.py
│   │   ├── parameter_network.py
│   │   ├── table.py
│   │   └── table_to_latex.py
│   ├── benchmark/
│   │   ├── CPD/
│   │   │   └── __init__.py
│   │   ├── CSSTuning/
│   │   │   ├── Compiler.py
│   │   │   ├── DBMS.py
│   │   │   └── __init__.py
│   │   ├── DownloadBench/
│   │   │   └── references
│   │   ├── HBOROB/
│   │   │   ├── algorithms.py
│   │   │   ├── hporobust.py
│   │   │   └── test.py
│   │   ├── HPO/
│   │   │   ├── HPO.py
│   │   │   ├── HPOAdaBoost.py
│   │   │   ├── HPOSVM.py
│   │   │   ├── HPOXGBoost.py
│   │   │   ├── __init__.py
│   │   │   ├── algorithms.py
│   │   │   ├── augmentation.py
│   │   │   ├── datasets.py
│   │   │   ├── fast_data_loader.py
│   │   │   ├── hparams_registry.py
│   │   │   ├── image_options.py
│   │   │   ├── misc.py
│   │   │   ├── networks.py
│   │   │   ├── test_model.py
│   │   │   ├── visualization.py
│   │   │   └── wide_resnet.py
│   │   ├── HPOB/
│   │   │   ├── HpobBench.py
│   │   │   └── plot.py
│   │   ├── HPOOOD/
│   │   │   ├── algorithms.py
│   │   │   ├── collect_results.py
│   │   │   ├── download.py
│   │   │   ├── fast_data_loader.py
│   │   │   ├── hparams_registry.py
│   │   │   ├── hpoood.py
│   │   │   ├── misc.py
│   │   │   ├── networks.py
│   │   │   ├── ooddatasets.py
│   │   │   └── wide_resnet.py
│   │   ├── RL/
│   │   │   ├── LunarlanderBenchmark.py
│   │   │   └── __init__.py
│   │   ├── __init__.py
│   │   ├── instantiate_problems.py
│   │   ├── problem_base/
│   │   │   ├── __init__.py
│   │   │   ├── base.py
│   │   │   ├── non_tab_problem.py
│   │   │   ├── tab_problem.py
│   │   │   └── transfer_problem.py
│   │   └── synthetic/
│   │       ├── MovingPeakBenchmark.py
│   │       ├── MultiObjBenchmark.py
│   │       ├── __init__.py
│   │       └── synthetic_problems.py
│   ├── datamanager/
│   │   ├── __init__.py
│   │   ├── database.py
│   │   ├── lsh.py
│   │   ├── manager.py
│   │   └── minhash.py
│   ├── optimizer/
│   │   ├── MultiObjOptimizer/
│   │   │   ├── CauMOpt.py
│   │   │   ├── IEIPV.py
│   │   │   ├── MoeadEGO.py
│   │   │   ├── ParEGO.py
│   │   │   ├── SMSEGO.py
│   │   │   └── __init__.py
│   │   ├── SingleObjOptimizer/
│   │   │   ├── KrigingOptimizer.py
│   │   │   ├── LFL.py
│   │   │   ├── MetaLearningOptimizer.py
│   │   │   ├── MultitaskOptimizer.py
│   │   │   ├── PROptimizer.py
│   │   │   ├── RBFNOptimizer.py
│   │   │   ├── RGPEOptimizer.py
│   │   │   ├── TPEOptimizer.py
│   │   │   ├── VizerOptimizer.py
│   │   │   └── __init__.py
│   │   ├── __init__.py
│   │   ├── acquisition_function/
│   │   │   ├── ConformalLCB.py
│   │   │   ├── __init__.py
│   │   │   ├── acf_base.py
│   │   │   ├── ei.py
│   │   │   ├── get_acf.py
│   │   │   ├── lcb.py
│   │   │   ├── model_manage/
│   │   │   │   ├── CMAESBest.py
│   │   │   │   ├── CMAESGeneration.py
│   │   │   │   ├── CMAESPreSelect.py
│   │   │   │   ├── DEBest.py
│   │   │   │   ├── DEGeneration.py
│   │   │   │   ├── DEPreSelect.py
│   │   │   │   ├── GABest.py
│   │   │   │   ├── GAGeneration.py
│   │   │   │   ├── GAPreSelect.py
│   │   │   │   ├── PSOBest.py
│   │   │   │   ├── PSOGeneration.py
│   │   │   │   └── PSOPreSelect.py
│   │   │   ├── moeadego.py
│   │   │   ├── pi.py
│   │   │   ├── piei.py
│   │   │   ├── sequential.py
│   │   │   ├── smsego.py
│   │   │   └── taf.py
│   │   ├── construct_optimizer.py
│   │   ├── model/
│   │   │   ├── HyperBO.py
│   │   │   ├── __init__.py
│   │   │   ├── bohb.py
│   │   │   ├── deepkernel.py
│   │   │   ├── dyhpo.py
│   │   │   ├── get_model.py
│   │   │   ├── gp.py
│   │   │   ├── hebo.py
│   │   │   ├── mhgp.py
│   │   │   ├── mlp.py
│   │   │   ├── model_base.py
│   │   │   ├── moeadego.py
│   │   │   ├── mtgp.py
│   │   │   ├── neuralprocess.py
│   │   │   ├── parego.py
│   │   │   ├── pr.py
│   │   │   ├── rbfn.py
│   │   │   ├── rf.py
│   │   │   ├── rgpe.py
│   │   │   ├── sgpt.py
│   │   │   ├── smsego.py
│   │   │   └── utils.py
│   │   ├── normalizer/
│   │   │   ├── __init__.py
│   │   │   ├── normalizer_base.py
│   │   │   └── standerd.py
│   │   ├── optimizer_base/
│   │   │   ├── EvoOptimizerBase.py
│   │   │   ├── __init__.py
│   │   │   ├── base.py
│   │   │   └── bo.py
│   │   ├── pretrain/
│   │   │   ├── __init__.py
│   │   │   ├── deepkernelpretrain.py
│   │   │   ├── get_pretrain.py
│   │   │   ├── hyper_bo.py
│   │   │   └── pretrain_base.py
│   │   ├── refiner/
│   │   │   ├── __init__.py
│   │   │   ├── box.py
│   │   │   ├── ellipse.py
│   │   │   ├── get_refiner.py
│   │   │   ├── prune.py
│   │   │   └── refiner_base.py
│   │   ├── sampler/
│   │   │   ├── __init__.py
│   │   │   ├── get_sampler.py
│   │   │   ├── gradient.py
│   │   │   ├── grid.py
│   │   │   ├── lhs.py
│   │   │   ├── lhs_BAK.py
│   │   │   ├── meta.py
│   │   │   ├── random.py
│   │   │   ├── sampler_base.py
│   │   │   └── sobel.py
│   │   └── selector/
│   │       ├── __init__.py
│   │       ├── fuzzy_selector.py
│   │       ├── lsh_selector.py
│   │       └── selector_base.py
│   ├── remote/
│   │   ├── __init__.py
│   │   ├── celeryconfig.py
│   │   ├── experiment_client.py
│   │   ├── experiment_server.py
│   │   ├── experiment_tasks.py
│   │   └── server_manager.sh
│   ├── space/
│   │   ├── __init__.py
│   │   ├── fidelity_space.py
│   │   ├── search_space.py
│   │   └── variable.py
│   └── utils/
│       ├── Initialization.py
│       ├── Kernel.py
│       ├── Normalization.py
│       ├── Prior.py
│       ├── Read.py
│       ├── Visualization.py
│       ├── __init__.py
│       ├── check.py
│       ├── encoding.py
│       ├── hypervolume.py
│       ├── log.py
│       ├── openml_data_manager.py
│       ├── pareto.py
│       ├── path.py
│       ├── plot.py
│       ├── profile.py
│       ├── rng_helper.py
│       ├── serialization.py
│       ├── sk.py
│       └── weights.py
└── webui/
    ├── .gitignore
    ├── LICENSE.md
    ├── package.json
    ├── public/
    │   ├── index.html
    │   ├── manifest.json
    │   ├── robots.txt
    │   └── transopt.psd
    ├── src/
    │   ├── App.css
    │   ├── App.js
    │   ├── App.test.js
    │   ├── app/
    │   │   ├── auth.js
    │   │   ├── init.js
    │   │   └── store.js
    │   ├── components/
    │   │   ├── CalendarView/
    │   │   │   ├── index.js
    │   │   │   └── util.js
    │   │   ├── Cards/
    │   │   │   └── TitleCard.js
    │   │   ├── Input/
    │   │   │   ├── InputText.js
    │   │   │   ├── SearchBar.js
    │   │   │   ├── SelectBox.js
    │   │   │   ├── TextAreaInput.js
    │   │   │   └── ToogleInput.js
    │   │   └── Typography/
    │   │       ├── ErrorText.js
    │   │       ├── HelperText.js
    │   │       ├── Subtitle.js
    │   │       └── Title.js
    │   ├── containers/
    │   │   ├── Header.js
    │   │   ├── Layout.js
    │   │   ├── LeftSidebar.js
    │   │   ├── ModalLayout.js
    │   │   ├── PageContent.js
    │   │   ├── RightSidebar.js
    │   │   ├── SidebarSubmenu.js
    │   │   └── SuspenseContent.js
    │   ├── features/
    │   │   ├── algorithm/
    │   │   │   ├── components/
    │   │   │   │   ├── OptTable.js
    │   │   │   │   └── SelectPlugin.js
    │   │   │   └── index.js
    │   │   ├── analytics/
    │   │   │   ├── charts/
    │   │   │   │   ├── Box.js
    │   │   │   │   ├── Trajectory.js
    │   │   │   │   └── my_theme.json
    │   │   │   ├── components/
    │   │   │   │   ├── LineChart.js
    │   │   │   │   └── SelectTask.js
    │   │   │   └── index.js
    │   │   ├── calendar/
    │   │   │   ├── CalendarEventsBodyRightDrawer.js
    │   │   │   └── index.js
    │   │   ├── charts/
    │   │   │   ├── components/
    │   │   │   │   ├── BarChart.js
    │   │   │   │   ├── DoughnutChart.js
    │   │   │   │   ├── LineChart.js
    │   │   │   │   ├── PieChart.js
    │   │   │   │   ├── ScatterChart.js
    │   │   │   │   └── StackBarChart.js
    │   │   │   └── index.js
    │   │   ├── chatbot/
    │   │   │   ├── ChatBot.js
    │   │   │   └── components/
    │   │   │       ├── ChatUI.js
    │   │   │       └── chatui-theme.css
    │   │   ├── common/
    │   │   │   ├── components/
    │   │   │   │   ├── ConfirmationModalBody.js
    │   │   │   │   └── NotificationBodyRightDrawer.js
    │   │   │   ├── headerSlice.js
    │   │   │   ├── modalSlice.js
    │   │   │   └── rightDrawerSlice.js
    │   │   ├── dashboard/
    │   │   │   ├── components/
    │   │   │   │   ├── AmountStats.js
    │   │   │   │   ├── BarChart.js
    │   │   │   │   ├── DashboardStats.js
    │   │   │   │   ├── DashboardTopBar.js
    │   │   │   │   ├── DoughnutChart.js
    │   │   │   │   ├── Footprint.js
    │   │   │   │   ├── Importance.js
    │   │   │   │   ├── LineChart.js
    │   │   │   │   ├── PageStats.js
    │   │   │   │   ├── ScatterChart.js
    │   │   │   │   ├── Trajectory.js
    │   │   │   │   ├── UserChannels.js
    │   │   │   │   └── my_theme.json
    │   │   │   └── index.js
    │   │   ├── documentation/
    │   │   │   ├── DocComponents.js
    │   │   │   ├── DocFeatures.js
    │   │   │   ├── DocGettingStarted.js
    │   │   │   └── components/
    │   │   │       ├── DocComponentsContent.js
    │   │   │       ├── DocComponentsNav.js
    │   │   │       ├── FeaturesContent.js
    │   │   │       ├── FeaturesNav.js
    │   │   │       ├── GettingStartedContent.js
    │   │   │       └── GettingStartedNav.js
    │   │   ├── experiment/
    │   │   │   ├── components/
    │   │   │   │   ├── DashboardStats.js
    │   │   │   │   ├── SearchData.js
    │   │   │   │   ├── SelectAlgorithm.js
    │   │   │   │   ├── SelectData.js
    │   │   │   │   └── SelectTask.js
    │   │   │   └── index.js
    │   │   ├── integration/
    │   │   │   └── index.js
    │   │   ├── leads/
    │   │   │   ├── components/
    │   │   │   │   └── AddLeadModalBody.js
    │   │   │   ├── index.js
    │   │   │   └── leadSlice.js
    │   │   ├── run/
    │   │   │   ├── components/
    │   │   │   │   ├── DataTable.js
    │   │   │   │   ├── OptTable.js
    │   │   │   │   ├── Run.js
    │   │   │   │   ├── RunProgress.js
    │   │   │   │   └── TaskTable.js
    │   │   │   └── index.js
    │   │   ├── seldata/
    │   │   │   ├── components/
    │   │   │   │   ├── DataTable.js
    │   │   │   │   ├── SearchData.js
    │   │   │   │   ├── SelectData.css
    │   │   │   │   └── SelectData.js
    │   │   │   └── index.js
    │   │   ├── settings/
    │   │   │   ├── billing/
    │   │   │   │   └── index.js
    │   │   │   ├── profilesettings/
    │   │   │   │   └── index.js
    │   │   │   └── team/
    │   │   │       └── index.js
    │   │   ├── transactions/
    │   │   │   └── index.js
    │   │   └── user/
    │   │       ├── ForgotPassword.js
    │   │       ├── LandingIntro.js
    │   │       ├── Login.js
    │   │       ├── Register.js
    │   │       └── components/
    │   │           └── TemplatePointers.js
    │   ├── index.css
    │   ├── index.js
    │   ├── pages/
    │   │   ├── GettingStarted.js
    │   │   └── protected/
    │   │       ├── 404.js
    │   │       ├── Algorithm.js
    │   │       ├── Analytics.js
    │   │       ├── Bills.js
    │   │       ├── Blank.js
    │   │       ├── Calendar.js
    │   │       ├── Charts.js
    │   │       ├── ChatOpt.js
    │   │       ├── Dashboard.js
    │   │       ├── Experiment.js
    │   │       ├── Integration.js
    │   │       ├── Leads.js
    │   │       ├── ProfileSettings.js
    │   │       ├── Run.js
    │   │       ├── Seldata.js
    │   │       ├── Team.js
    │   │       ├── Transactions.js
    │   │       └── Welcome.js
    │   ├── reportWebVitals.js
    │   ├── routes/
    │   │   ├── index.js
    │   │   └── sidebar.js
    │   ├── setupTests.js
    │   └── utils/
    │       ├── dummyData.js
    │       └── globalConstantUtil.js
    └── tailwind.config.js

================================================
FILE CONTENTS
================================================

================================================
FILE: .gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
data/
var/
log/
wheels/
*.egg-info/
.installed.cfg
*.egg


# PyInstaller
# 通常如果您使用PyInstaller,以下目录应该被忽略
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/

# Jupyter Notebook
.ipynb_checkpoints

# pyenv
.python-version

# celery beat schedule file
celerybeat-schedule

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

.idea
.vscode

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# pytype static type analyzer
.pytype/

# Cython debug symbols
cython_debug/

*.log
experiment_results/
collected_results/
demo/comparison/frames/
demo/comparison/gifs/
demo/comparison/pngs/
demo/comparison/htmls/
demo/draw/
demo/importances/pngs/
```

**/__pycache__/

run.sh
run1.sh
fetch_data.py
test.sh
sample_points.py

================================================
FILE: LICENSE
================================================
BSD 3-Clause License

Copyright (c) 2023, peilimao
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

1. Redistributions of source code must retain the above copyright notice, this
   list of conditions and the following disclaimer.

2. Redistributions in binary form must reproduce the above copyright notice,
   this list of conditions and the following disclaimer in the documentation
   and/or other materials provided with the distribution.

3. Neither the name of the copyright holder nor the names of its
   contributors may be used to endorse or promote products derived from
   this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.


================================================
FILE: MANIFEST.in
================================================


================================================
FILE: README.md
================================================
<p align="center">
  <a href="https://maopl.github.io/TransOpt-doc/">
    <img src="./docs/source/_static/figures/transopt_logo.jpg" alt="" width="40%" align="top">
  </a>
</p>
<p align="center">
  TransOPT: Transfer Optimization System for Bayesian Optimization Using Transfer Learning<br>
  <a href="https://maopl.github.io/TransOpt-doc/">Docs</a> |
  <a href="https://maopl.github.io/TransOpt-doc/quickstart.html">Tutorials</a> |
  <a href="https://maopl.github.io/TransOpt-doc/usage/problems.html">Examples</a> |
  <a href="">Paper</a> |
  <a href="https://maopl.github.io/TransOpt-doc">Citation</a> |
</p>

<div align="center">

<a href="https://github.com/psf/black"><img alt="Code style: black" src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
<a href="https://github.com/psf/black"><img alt="Code style: black" src="https://img.shields.io/badge/python_version-3.10-purple"></a>

</div>


# Welcome to TransOPT!

**TransOPT** is an open-source software platform designed to facilitate the **design, benchmarking, and application of transfer learning for Bayesian optimization (TLBO)** algorithms through a modular, data-centric framework.

## Features

- **Contains more than 1000 benchmark problems covers diverse range of domains**.  
- **Build custom optimization algorithms as easily as stacking building blocks**.  
- **Leverage historical data to achieve more efficient and informed optimization**.  
- **Deploy experiments through an intuitive web UI and monitor results in real-time**.

TransOPT empowers researchers and developers to explore innovative optimization solutions effortlessly, bridging the gap between theory and practical application.

# [Installation: how to install TransOPT](https://maopl.github.io/TransOpt-doc/installation.html)

TransOPT is composed of two main components: the backend for data processing and business logic, and the frontend for user interaction. Each can be installed as follows:

### Prerequisites

Before installing TransOPT, you must have the following installed:

- **Python 3.10+**: Ensure Python is installed.
- **Node.js 17.9.1+ and npm 8.11.0+**: These are required to install and build the frontend. [Download Node.js](https://nodejs.org/en/download/)

Please install these prerequisites if they are not already installed on your system.

1. Clone the repository:
   ```shell
   $ git clone https://github.com/maopl/TransOpt.git
   ```

2. Install the required dependencies:
   ```shell
   $ cd TransOpt
   $ python setup.py install
   ```

3. Install the frontend dependencies:
   ```shell
   $ cd webui && npm install
   ```

### Start the Backend Agent

To start the backend agent, use the following command:

```bash
$ python transopt/agent/app.py
```

### Web User Interface Mode

When TransOPT has been started successfully, go to the webui directory and start the web UI on your local machine. Enable the user interface mode with the following command:
```bash
cd webui && npm start
```

This will open the TransOPT interface in your default web browser at `http://localhost:3000`.


### Command Line Mode

In addition to the web UI mode, TransOPT also offers a Command Line (CMD) mode for users who may not have access to a display screen, such as when working on a remote server.

To run TransOPT in CMD mode, use the following command:

```bash
python transopt/agent/run_cli.py -n Sphere -v 3 -o 1 -m RF -acf UCB -b 300
```

This command sets up a task named Sphere with 3 variables and 1 objectives, using a Random Forest model (RF) as surrogate model and the upper confidence bound (UCB) acquisition function, with a budget of 300 function evaluations.

For a complete list of available options and more detailed usage instructions, please refer to the [CLI documentation](https://maopl.github.io/TransOpt-doc/usage/cli.html).


# [Documentation: The TransOPT Process](https://maopl.github.io/TransOpt-doc/)

Our docs walk you through using TransOPT, web UI and key API points. For an overview of the system and workflow for project management, see our documentation [documentation](https://maopl.github.io/TransOpt-doc/).


<p align="center">
<img src="./docs/source/_static/figures/Transopt_workflow.png" width="95%">
</p>


# Why use TransOPT?

Recent years, Bayesian optimization (BO) has been widely used in various fields, such as hyperparameter optimization, molecular design, and synthetic biology. However, conventional BO is not that efficient, where it conduct every optimization task from scratch while ignoring the experiences gained from previous problem-solving practices. To address this challenge, transfer learning (TL) has been introduced to BO, aiming to leverage auxillary data to improve the optimization efficiency and performance. Despite the potential of TLBO, the usage of TLBO is still limited due to the complexity of advanced TLBO methods. TransOPT, a system that facilitates:

- development of TLBO algorithms;
- benchmarking the performance of TLBO methods;
- applications of TLBO for downstream tasks;

<p align="center">
<img src="./docs/source/_static/figures/Results.png" width="95%">
</p>

**Upper-left:** illustrates the use of a web UI to construct new optimization algorithms by combining different components. **Upper-right:** highlights the application of an LLM agent to effectively manage optimization tasks. **Middle:** shows various visualization results derived from the optimization processes. **Lower:** presents a performance comparison of different TLBO methods.






# Reference & Citation

If you find our work helpful to your research, please consider citing our:

```bibtex
@article{TransOPT,
  title = {{TransOPT}: Transfer Optimization System for Bayesian Optimization Using Transfer Learning},
  author = {Author Name and Collaborator Name},
  url = {https://github.com/maopl/TransOPT},
  year = {2024}
}
```





================================================
FILE: demo/analysis.py
================================================
import logging
import os
import argparse

from pathlib import Path
from transopt.ResultAnalysis.AnalysisPipeline import analysis_pipeline


def run_analysis(Exper_folder:Path, tasks, methods, seeds, args):
    logger = logging.getLogger(__name__)
    analysis_pipeline(Exper_folder, tasks=tasks, methods=methods, seeds=seeds, args=args)




if __name__ == '__main__':
    tasks = {
        # 'cp': {'budget': 8, 'time_stamp': 2, 'params': {'input_dim': 2}},
        'Ackley': {'budget': 11, 'time_stamp': 3, 'params':{'input_dim':1}},
        # 'MPB': {'budget': 110, 'time_stamp': 3},
        # 'Griewank': {'budget': 11, 'time_stamp': 3,  'params':{'input_dim':1}},
        # 'DixonPrice': {'budget': 110, 'time_stamp': 3},
        # 'Lunar': {'budget': 110, 'time_stamp': 3},
        # 'XGB': {'budget': 110, 'time_stamp': 3},
    }
    Methods_list = {'MTBO', 'BO'}
    Seeds = [1,2,3,4,5]

    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument("-in", "--init_number", type=int, default=0)
    parser.add_argument("-p", "--exp_path", type=str, default='../LFL_experiments')
    parser.add_argument("-n", "--exp_name", type=str, default='test')  # 实验名称,保存在experiments中
    parser.add_argument("-c", "--comparision", type=bool, default=True)
    parser.add_argument("-a", "--track", type=bool, default=True)
    parser.add_argument("-r", "--report", type=bool, default=False)


    args = parser.parse_args()
    Exp_name = args.exp_name
    Exp_folder = args.exp_path
    Exper_folder = '{}/{}'.format(Exp_folder, Exp_name)
    Exper_folder = Path(Exper_folder)
    run_analysis(Exper_folder, tasks=tasks, methods=Methods_list, seeds = Seeds, args=args)



================================================
FILE: demo/causal_analysis.py
================================================
import logging
import os
import argparse

from pathlib import Path
from transopt.ResultAnalysis.AnalysisPipeline import analysis_pipeline


def run_analysis(Exper_folder:Path, tasks, methods, seeds, args):
    logger = logging.getLogger(__name__)
    analysis_pipeline(Exper_folder, tasks=tasks, methods=methods, seeds=seeds, args=args)



if __name__ == '__main__':
    tasks = {
        "GCC": {"budget": samples_num, "workloads": workloads},
        "LLVM": {"budget": samples_num, "workloads": workloads},
    }

    available_workloads = CompilerBenchmarkBase.AVAILABLE_WORKLOADS
    split_workloads = split_into_segments(available_workloads, 10)

    if split_index >= len(split_workloads):
        raise IndexError("split index out of range")

    workloads = split_workloads[split_index]

    tasks = {
        "GCC": {"budget": samples_num, "workloads": workloads},
        "LLVM": {"budget": samples_num, "workloads": workloads},
    }
    Methods_list = {'MTBO', 'BO'}
    Seeds = [1,2,3,4,5]

    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument("-in", "--init_number", type=int, default=0)
    parser.add_argument("-p", "--exp_path", type=str, default='../LFL_experiments')
    parser.add_argument("-n", "--exp_name", type=str, default='test')  # 实验名称,保存在experiments中
    parser.add_argument("-c", "--comparision", type=bool, default=True)
    parser.add_argument("-a", "--track", type=bool, default=True)
    parser.add_argument("-r", "--report", type=bool, default=False)


    args = parser.parse_args()
    Exp_name = args.exp_name
    Exp_folder = args.exp_path
    Exper_folder = '{}/{}'.format(Exp_folder, Exp_name)
    Exper_folder = Path(Exper_folder)
    run_analysis(Exper_folder, tasks=tasks, methods=Methods_list, seeds = Seeds, args=args)



================================================
FILE: demo/comparison/analysis_hypervolume.py
================================================
import sys
from pathlib import Path

current_path = Path(__file__).resolve().parent
package_path = current_path.parent.parent
sys.path.insert(0, str(package_path))

import json

import numpy as np
import pandas as pd
import scipy.stats

from transopt.utils.pareto import calc_hypervolume, find_pareto_front
from transopt.utils.plot import plot3D

target = "gcc"

results_path = package_path / "experiment_results"
gcc_results = results_path / "gcc_archive_new"
llvm_results = results_path / "llvm_archive"

algorithm_list = ["ParEGO", "SMSEGO", "MoeadEGO", "CauMO"]
objectives = ["execution_time", "file_size", "compilation_time"]
seed_list = [65535, 65536, 65537, 65538, 65539]


def load_and_prepare_data(file_path, objectives):
    """
    Loads JSON data and prepares a DataFrame.
    """
    # print(f"Loading data from {file_path}")
    with open(file_path, "r") as f:
        data = json.load(f)
        data = data.get("1", {})

    input_vectors = data["input_vector"]
    output_vectors = data["output_value"]

    df_input = pd.DataFrame(input_vectors)

    df_output = pd.DataFrame(output_vectors)[objectives]
    df_combined = pd.concat([df_input, df_output], axis=1)
    # print(f"Loaded {len(df_combined)} data points")

    df_combined = df_combined.drop_duplicates(subset=df_input.columns.tolist())

    for obj in objectives:
        df_combined = df_combined[df_combined[obj] != 1e10]

    # print(f"Loaded {len(df_combined)} data points, removed {len(df_input) - len(df_combined)} duplicates")
    # print()
    return df_combined

def load_data(workload, algorithm, seed):
    if target == "llvm":
        result_file = llvm_results / f"llvm_{workload}" / algorithm / f"{seed}_KB.json"
    else:
        result_file = gcc_results / f"gcc_{workload}" / algorithm / f"{seed}_KB.json"
    df = load_and_prepare_data(result_file, objectives)
    return df

def collect_all_data(workload):
    all_data = []
    for algorithm in algorithm_list:
        for seed in seed_list:
            df = load_data(workload, algorithm, seed)
            all_data.append(df[objectives].values)
    all_data = np.vstack(all_data)
    global_mean = all_data.mean(axis=0)
    global_std = all_data.std(axis=0)
    return all_data, global_mean, global_std


def calculate_mean_hypervolume(
    algorithm, workload, global_stats1, global_stats2, normalization_type="min-max"
):
    """
    Calculate mean hypervolume for a given algorithm across all seeds.

    Parameters:
    global_stats1: Global mean or min of all objectives (depending on normalization_type)
    global_stats2: Global std or max of all objectives (depending on normalization_type)
    normalization_type: 'min-max' or 'mean' for different types of normalization
    """
    hypervolume_list = []
    for seed in seed_list:
        df = load_data(workload, algorithm, seed)

        if normalization_type == "mean":
            # Apply mean normalization
            normalized_df = (df[objectives] - global_stats1) / global_stats2
        elif normalization_type == "min-max":
            # Apply min-max normalization
            normalized_df = (df[objectives] - global_stats1) / (
                global_stats2 - global_stats1
            )
        else:
            raise ValueError(
                "Unsupported normalization type. Choose 'mean' or 'min-max'."
            )

        pareto_front = find_pareto_front(normalized_df.values)
        hypervolume = calc_hypervolume(pareto_front, np.ones(len(objectives)))
        # print(f"{algorithm} {seed} {hypervolume}")
        hypervolume_list.append(hypervolume)

    return np.mean(hypervolume_list)


def calculate_hypervolumes(
    algorithm, workload, global_stats1, global_stats2, normalization_type="min-max"
):
    """
    Calculate hypervolumes for a given algorithm across all seeds.

    Parameters:
    global_stats1: Global mean or min of all objectives (depending on normalization_type)
    global_stats2: Global std or max of all objectives (depending on normalization_type)
    normalization_type: 'min-max' or 'mean' for different types of normalization
    """
    hypervolume_list = []
    for seed in seed_list:
        df = load_data(workload, algorithm, seed)

        if normalization_type == "mean":
            normalized_df = (df[objectives] - global_stats1) / global_stats2
        elif normalization_type == "min-max":
            normalized_df = (df[objectives] - global_stats1) / (global_stats2 - global_stats1)
        else:
            raise ValueError("Unsupported normalization type. Choose 'mean' or 'min-max'.")

        pareto_front = find_pareto_front(normalized_df.values)
        hypervolume = calc_hypervolume(pareto_front, np.ones(len(objectives)))
        hypervolume_list.append(hypervolume)

    return hypervolume_list

def analyze_and_compare_algorithms(workload_results):
    analysis_results = {}

    for workload, algorithms in workload_results.items():
        workload_analysis = {
            'means': {},
            'std_devs': {},
            'significance': {}
        }

        # 计算每种算法的平均超体积和标准差,并找出最佳算法
        best_algorithm = None
        best_mean_hv = -float('inf')
        for algorithm, hypervolumes in algorithms.items():
            mean_hv = np.mean(hypervolumes)
            workload_analysis['means'][algorithm] = mean_hv
            workload_analysis['std_devs'][algorithm] = np.std(hypervolumes)

            if mean_hv > best_mean_hv:
                best_mean_hv = mean_hv
                best_algorithm = algorithm

        # 对每个算法进行显著性检验,只与最佳算法比较
        for algorithm, hypervolumes in algorithms.items():
            if algorithm != best_algorithm:
                stat, p_value = scipy.stats.mannwhitneyu(algorithms[best_algorithm], hypervolumes)
                comparison_key = f"{algorithm} vs {best_algorithm}"
                workload_analysis['significance'][comparison_key] = ('+' if p_value < 0.05 else '-')
                
        # # 进行算法间的显著性检验
        # algorithm_names = list(algorithms.keys())
        # for i in range(len(algorithm_names)):
        #     for j in range(i+1, len(algorithm_names)):
        #         hypervolumes1 = algorithms[algorithm_names[i]]
        #         hypervolumes2 = algorithms[algorithm_names[j]]
        #         stat, p_value = scipy.stats.mannwhitneyu(hypervolumes1, hypervolumes2)
        #         comparison_key = f"{algorithm_names[i]} vs {algorithm_names[j]}"
        #         workload_analysis['significance'][comparison_key] = ('+' if p_value < 0.05 else '-')

        analysis_results[workload] = workload_analysis

    return analysis_results

def matrix_to_latex(analysis_results, caption):
    latex_code = []

    # 添加文档类和宏包
    latex_code.extend([
        "\\documentclass{article}",
        "\\usepackage{geometry}",
        "\\geometry{a4paper, margin=1in}",
        "\\usepackage{graphicx}",
        "\\usepackage{colortbl}",
        "\\usepackage{booktabs}",
        "\\usepackage{threeparttable}",
        "\\usepackage{caption}",
        "\\usepackage{xcolor}",
        "\\pagestyle{empty}",
        "\\begin{document}",
        "\\begin{table*}[t!]",
        "    \\scriptsize",
        "    \\centering",
        f"    \\caption{{{caption}}}",
        "    \\resizebox{1.0\\textwidth}{!}{",
        "    \\begin{tabular}{c|" + "".join(["c"] * len(analysis_results)) + "}",
        "        \\hline"
    ])

    # 确定算法列表
    algorithms = list(analysis_results[next(iter(analysis_results))]['means'].keys())

    # 添加列名(每个算法一个列)
    col_header = " & ".join([""] + [f"\\texttt{{{algorithm}}}" for algorithm in algorithms]) + " \\\\"
    latex_code.append("        " + col_header)
    latex_code.append("        \\hline")

    # 添加行
    for workload in analysis_results.keys():
        row_data = [workload]
        best_algorithm = max(analysis_results[workload]['means'], key=analysis_results[workload]['means'].get)
        for algorithm in analysis_results[workload]['means'].keys():
            mean = analysis_results[workload]['means'][algorithm]
            std_dev = analysis_results[workload]['std_devs'][algorithm]
            significance_mark = ""

            if algorithm != best_algorithm:
                for other_algorithm, sig_value in analysis_results[workload]['significance'].items():
                    if algorithm in other_algorithm and sig_value == '+':
                        significance_mark = "$^\\dagger$"
                        break

            if algorithm == best_algorithm:
                row_data.append(f"\\cellcolor[rgb]{{.682, .667, .667}}\\textbf{{{mean:.3f} (±{std_dev:.3f})}}{significance_mark}")
            else:
                row_data.append(f"{mean:.3f} (±{std_dev:.3f}){significance_mark}")

        latex_code.append("        " + " & ".join(row_data) + " \\\\")

    # 添加表注
    latex_code.extend([
        "        \\hline",
        "    \\end{tabular}",
        "    }",
        "    \\begin{tablenotes}",
        "        \\tiny",
        "        \\item $^\\dagger$ indicates that the best algorithm is significantly better than the other one according to the Wilcoxon signed-rank test at a 5\\% significance level."
        "    \\end{tablenotes}",
        "\\end{table*}%",
        "\\end{document}"
    ])
    
        # latex_code.append("        " + " & ".join(row_data)
                          
    # # 添加列名
    # col_header = " & ".join([""] + list(analysis_results.keys())) + " \\\\"
    # latex_code.append("        " + col_header)
    # latex_code.append("        \\hline")

    # # 添加行
    # for algorithm in analysis_results[next(iter(analysis_results))]['means'].keys():
    #     row_data = [f"\\texttt{{{algorithm}}}"]
    #     for workload, results in analysis_results.items():
    #         mean = results['means'][algorithm]
    #         std_dev = results['std_devs'][algorithm]
    #         significance_mark = ""

    #         for other_algorithm, sig_value in results['significance'].items():
    #             if algorithm in other_algorithm and sig_value == '+':
    #                 significance_mark = "$^\\dagger$"
    #                 break

    #         row_data.append(f"{mean:.3f} (±{std_dev:.3f}){significance_mark}")
    #     latex_code.append("        " + " & ".join(row_data) + " \\\\")
        
    return "\n".join(latex_code)



def load_workloads():
    file_path = package_path / "demo" / "comparison" / f"features_by_workload_{target}.json"
    with open(file_path, "r") as f:
        return json.load(f).keys()


if __name__ == "__main__":
    workloads = load_workloads()

    workloads = list(workloads)
    workloads.sort()
    workloads = workloads[:14]
    
    # workloads = [
    #     "cbench-automotive-qsort1",
    #     "cbench-automotive-susan-e",
    #     "cbench-network-patricia",
    #     "cbench-automotive-bitcount",
    #     "cbench-bzip2",
    #     "cbench-telecom-adpcm-d",
    #     "cbench-office-stringsearch2",
    #     "cbench-security-rijndael",
    #     "cbench-security-sha",
    # ]

    workload_results = {}
    for workload in workloads:
        print(f"Processing workload: {workload}")
        all_data, global_mean, global_std = collect_all_data(workload)
        global_max = all_data.max(axis=0)
        global_min = all_data.min(axis=0)

        algorithm_results = {}
        for algorithm in algorithm_list:
            hypervolumes = calculate_hypervolumes(
                algorithm,
                workload,
                global_min,
                global_max,
                normalization_type="min-max",
            )
            algorithm_results[algorithm] = hypervolumes
        
        # Remove the prefix from the workload name ""
        workload_short_name = workload[7:]
        workload_results[workload_short_name] = algorithm_results

    final_results = analyze_and_compare_algorithms(workload_results)
    print(final_results)

    caption = "Perfomance Comparison of Algorithms"
    latex_table = matrix_to_latex(final_results, caption)

    latex_table_path = "latex_table.tex"
    with open(latex_table_path, 'w') as file:
        file.write(latex_table)
        
    # for workload in workloads:
    #     print(workload)
    #     all_data, global_mean, global_std = collect_all_data(workload)
    #     global_max = all_data.max(axis=0)
    #     global_min = all_data.min(axis=0)

    #     hv_list = []
    #     for algorithm in algorithm_list:
    #         mean_hypervolume = calculate_mean_hypervolume(
    #             algorithm,
    #             workload,
    #             global_min,
    #             global_max,
    #             normalization_type="min-max",
    #         )
    #         hv_list.append((algorithm, mean_hypervolume))

    #     # Sort by hypervolume
    #     hv_list.sort(key=lambda x: x[1], reverse=True)

    #     print(hv_list)
    #     print()


================================================
FILE: demo/comparison/analysis_plot.py
================================================
import sys
from pathlib import Path

current_path = Path(__file__).resolve().parent
package_path = current_path.parent.parent
sys.path.insert(0, str(package_path))

import json
import os

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# import plotly.graph_objects as go
from matplotlib.animation import FuncAnimation
from mpl_toolkits.mplot3d import Axes3D

from transopt.utils.pareto import calc_hypervolume, find_pareto_front
from transopt.utils.plot import plot3D

target = "gcc"
results_path = package_path / "experiment_results"
gcc_results_path = results_path / "gcc_comparsion"
gcc_samples_path = results_path / "gcc_samples"
llvm_results = results_path / "llvm_comparsion"
llvm_samples_path = results_path / "llvm_samples"

dbms_samples_path = results_path / "dbms_samples"

algorithm_list = ["ParEGO", "SMSEGO", "MoeadEGO", "CauMO"]
# algorithm_list = ["SMSEGO"]
# objectives = ["execution_time", "file_size", "compilation_time"]
objectives = ["latency", "throughput"]
seed_list = [65535, 65536, 65537, 65538, 65539]


def load_and_prepare_data(file_path):
    """
    Loads JSON data and prepares a DataFrame.
    """
    # print(f"Loading data from {file_path}")
    with open(file_path, "r") as f:
        data = json.load(f)
        if "1" in data:
            data = data["1"]

    input_vectors = data["input_vector"]
    output_vectors = data["output_value"]

    df_input = pd.DataFrame(input_vectors)

    df_output = pd.DataFrame(output_vectors)[objectives]
    df_combined = pd.concat([df_input, df_output], axis=1)
    print(f"Loaded {len(df_combined)} data points")

    df_combined = df_combined.drop_duplicates(subset=df_input.columns.tolist())

    for obj in objectives:
        df_combined = df_combined[df_combined[obj] != 1e10]

    print(f"Loaded {len(df_combined)} data points, removed {len(df_input) - len(df_combined)} duplicates")
    print()
    return df_combined

def load_data(workload, algorithm, seed):
    if target == "llvm":
        result_file = llvm_results / f"llvm_{workload}" / algorithm / f"{seed}_KB.json"
    else:
        result_file = gcc_results_path / f"gcc_{workload}" / algorithm / f"{seed}_KB.json"
    df = load_and_prepare_data(result_file)
    return df

def collect_all_data(workload):
    all_data = []
    for algorithm in algorithm_list:
        for seed in seed_list:
            df = load_data(workload, algorithm, seed)
            all_data.append(df[objectives].values)
    all_data = np.vstack(all_data)
    global_mean = all_data.mean(axis=0)
    global_std = all_data.std(axis=0)
    return all_data, global_mean, global_std


def dynamic_plot(workload, algorithm, seed):
    """
    Dynamically plot the three objectives for a given workload and algorithm for a specific seed.
    """
    # Collect all data to understand the range
    all_data, global_mean, global_std = collect_all_data(workload)
    global_min = np.min(all_data, axis=0)
    global_max = np.max(all_data, axis=0)
   
    # Load data for the specific seed
    df = load_data(workload, algorithm, seed)
    
    # Normalize data (Min-Max normalization)
    df_normalized = (df[objectives] - global_min) / (global_max - global_min)
     
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.set_title(f"Dynamic Plot for {workload} - {algorithm} - Seed {seed}")
    ax.set_xlabel(objectives[0])
    ax.set_ylabel(objectives[1])
    ax.set_zlabel(objectives[2])

    # Initialize two scatter plots: one for all previous points, one for the new point
    previous_points = ax.scatter([], [], [], c='b', marker='o')  # all previous points in blue
    current_point = ax.scatter([], [], [], c='r', marker='o')  # current point in red
    
    def init():
        previous_points._offsets3d = ([], [], [])
        current_point._offsets3d = ([], [], [])
        return previous_points, current_point

    def update(frame):
        # Add all previous points up to the current frame
        previous_points._offsets3d = (df_normalized.iloc[:frame][objectives[0]].values,
                                      df_normalized.iloc[:frame][objectives[1]].values,
                                      df_normalized.iloc[:frame][objectives[2]].values)

        # Add the current point (latest one in the sequence)
        current_point._offsets3d = (df_normalized.iloc[frame:frame+1][objectives[0]].values,
                                    df_normalized.iloc[frame:frame+1][objectives[1]].values,
                                    df_normalized.iloc[frame:frame+1][objectives[2]].values)
        return previous_points, current_point
    
    frames = len(df)
    ani = FuncAnimation(fig, update, frames=frames, blit=False, repeat=False)
    
    # Save the plot to a file
    gif_path = package_path / "demo" / "comparison" / "gifs" / f"{target}_{algorithm}_{workload}_{seed}.gif"
    ani.save(gif_path, writer='imagemagick')
    plt.close(fig)  # Close the plot to free memory


# def dynamic_plot_html(workload, algorithm, seed):
    """
    Dynamically plot the three objectives for a given workload and algorithm for a specific seed using Plotly.
    """
    # Collect all data to understand the range
    all_data, global_mean, global_std = collect_all_data(workload)
    global_min = np.min(all_data, axis=0)
    global_max = np.max(all_data, axis=0)
   
    # Load data for the specific seed
    df = load_data(workload, algorithm, seed)
    
    # Normalize data (Min-Max normalization)
    df_normalized = (df[objectives] - global_min) / (global_max - global_min)
    df_normalized = df_normalized

    pareto_front, pareto_front_index = find_pareto_front(df_normalized.values, return_index=True)
    df_normalized = df_normalized.iloc[pareto_front_index]
    
    # Create traces for previous and current points
    trace1 = go.Scatter3d(x=[], y=[], z=[], mode='markers', marker=dict(size=5, color='blue'))
    trace2 = go.Scatter3d(x=[], y=[], z=[], mode='markers', marker=dict(size=5, color='red'))

    # Combine traces into a data list
    data = [trace1, trace2]

    # Create the layout of the plot
    layout = go.Layout(
        title=f"Dynamic Plot for {workload} - {algorithm} - Seed {seed}",
        scene=dict(
            xaxis=dict(title=objectives[0], range=[0, 1]),
            yaxis=dict(title=objectives[1], range=[0, 1]),
            zaxis=dict(title=objectives[2], range=[0, 1])
        )
    )
    
    # Create the figure
    fig = go.Figure(data=data, layout=layout)

    # Create frames for the animation
    frames = []
    for t in range(len(df)):
        frame = go.Frame(
            data=[
                go.Scatter3d(
                    x=df_normalized.iloc[:t+1][objectives[0]].values,
                    y=df_normalized.iloc[:t+1][objectives[1]].values,
                    z=df_normalized.iloc[:t+1][objectives[2]].values,
                    mode='markers',
                    marker=dict(size=5, color='blue')
                ),
                go.Scatter3d(
                    x=df_normalized.iloc[t:t+1][objectives[0]].values,
                    y=df_normalized.iloc[t:t+1][objectives[1]].values,
                    z=df_normalized.iloc[t:t+1][objectives[2]].values,
                    mode='markers',
                    marker=dict(size=5, color='red')
                )
            ]
        )
        frames.append(frame)

    fig.frames = frames
   
    prev_frame_button = dict(
        args=[None, {"frame": {"duration": 0, "redraw": False}, "mode": "immediate", "transition": {"duration": 0}}],
        label='Previous',
        method='animate'
    )

    next_frame_button = dict(
        args=[None, {"frame": {"duration": 0, "redraw": False}, "mode": "immediate", "transition": {"duration": 0}}],
        label='Next',
        method='animate'
    )

    fig.update_layout(
        updatemenus=[dict(
            type='buttons',
            showactive=False,
            y=0,
            x=1.05,
            xanchor='right',
            yanchor='top',
            pad=dict(t=0, r=10),
            buttons=[prev_frame_button, next_frame_button]
        )]
    )
 
    # fig.update_layout(sliders=sliders)

    # Save the plot to HTML file
    html_path = package_path / "demo" / "comparison" / "htmls" / f"dynamic_{target}_{algorithm}_{workload}_{seed}.html"
    fig.write_html(str(html_path))


def save_individual_frames(workload, algorithm, seed):
    """
    Save each frame of the three objectives as a separate plot for a given workload, algorithm, and seed.
    """
    # Load data for the specific seed
    df = load_data(workload, algorithm, seed)

    # Ensure the directory for saving frames exists
    frames_dir = package_path / "demo" / "comparison" / "frames" / f"{algorithm}_{workload}_{seed}"
    os.makedirs(frames_dir, exist_ok=True)
    
    for idx in range(len(df)):
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        
        # Add data points from the DataFrame row by row
        x, y, z = df.iloc[idx][objectives[0]], df.iloc[idx][objectives[1]], df.iloc[idx][objectives[2]]
        
        # Plot and customize as needed
        ax.scatter(x, y, z, color='r')
        ax.set_title(f"Frame {idx} for {workload} - {algorithm} - Seed {seed}")
        ax.set_xlabel(objectives[0])
        ax.set_ylabel(objectives[1])
        ax.set_zlabel(objectives[2])
        
        # Save the plot as a file
        frame_file = frames_dir / f"frame_{idx:04d}.png"
        plt.savefig(frame_file)
        plt.close(fig)  # Close the plot to free memory
        

def load_workloads():
    file_path = package_path / "demo" / "comparison" / f"features_by_workload_{target}.json"
    with open(file_path, "r") as f:
        return json.load(f).keys()


# def plot_pareto_front_html(workload):
    # df = load_and_prepare_data(gcc_samples_path / f"GCC_{workload}.json")
    df = load_and_prepare_data(llvm_samples_path / f"LLVM_{workload}.json")
    df_normalized = (df - df.min()) / (df.max() - df.min())
    _, pareto_indices = find_pareto_front(df_normalized[objectives].values, return_index=True)
    
    # Retrieve Pareto points
    pareto_points = df_normalized.iloc[pareto_indices][objectives]
    
    # Create a 3D scatter plot using plotly
    fig = go.Figure(data=[go.Scatter3d(
        x=pareto_points[objectives[0]],
        y=pareto_points[objectives[1]],
        z=pareto_points[objectives[2]],
        mode='markers',
        marker=dict(
            size=5,
            color='blue',  # set color to blue
            opacity=0.8
        )
    )])

    # Update the layout
    fig.update_layout(
        title=f"Pareto Front for {workload}",
        scene=dict(
            xaxis_title=objectives[0],
            yaxis_title=objectives[1],
            zaxis_title=objectives[2]
        )
    )

    # Define the path for HTML file
    html_path = package_path / "demo" / "comparison" / "htmls"
    # Ensure the directory exists
    html_path.mkdir(parents=True, exist_ok=True)

    # Save the plot as an HTML file
    fig.write_html(str(html_path / f"{target}_pareto_front_{workload}.html"))


def plot_pareto_front(workload):
    # df = load_and_prepare_data(gcc_samples_path / f"GCC_{workload}.json")
    # df = load_and_prepare_data(llvm_samples_path / f"LLVM_{workload}.json")
    df = load_data(workload, "ParEGO", 65535)
    df_normalized = (df - df.min()) / (df.max() - df.min())
    _, pareto_indices = find_pareto_front(df_normalized[objectives].values, return_index=True)
    
    # Retrieve Pareto points
    points = df_normalized.iloc[pareto_indices][objectives]
    
    # Create a 3D scatter plot
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.set_title(f"Pareto Front for {workload}")
    ax.set_xlabel(objectives[0])
    ax.set_ylabel(objectives[1])
    ax.set_zlabel(objectives[2])
    
    # # Scatter plot for Pareto front
    # points = df_normalized[objectives]
    
    # Convert Series to NumPy array before plotting
    x_values = points[objectives[0]].values
    y_values = points[objectives[1]].values
    z_values = points[objectives[2]].values

    ax.scatter(x_values, y_values, z_values, c='b', marker='o')

    # Save the plot as a file
    file_path = package_path / "demo" / "comparison" / "pngs" / f"{target}_pf_{workload}.png"
    plt.savefig(file_path)
    plt.close(fig)  # Close the plot to free memory
    
    
def plot_all(workload, algorithm=""):
    # df = load_and_prepare_data(llvm_samples_path / f"LLVM_{workload}.json")
    # df = load_and_prepare_data(gcc_samples_path / f"GCC_{workload}.json")
    # df = load_data(workload, algorithm, 65535)
    df = load_and_prepare_data(dbms_samples_path / f"DBMS_{workload}.json")
    df_normalized = (df - df.min()) / (df.max() - df.min())
    df_normalized = df
    
    # Create a 3D scatter plot
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.set_title(f"All samples for {workload}")
    ax.set_xlabel(objectives[0])
    ax.set_ylabel(objectives[1])
    ax.set_zlabel(objectives[2])
    
    # Scatter plot for Pareto front
    points = df_normalized[objectives]
    
    # Convert Series to NumPy array before plotting
    x_values = points[objectives[0]].values
    y_values = points[objectives[1]].values
    z_values = points[objectives[2]].values

    ax.scatter(x_values, y_values, z_values, c='b', marker='o')

    # Save the plot as a file
    file_path = package_path / "demo" / "comparison" / "pngs" / f"{target}_{workload}.png"
    plt.savefig(file_path)
    plt.close(fig)  # Close the plot to free memory
    
# 2D plot all
def plot_all_2d(workload, algorithm=""):
    # df = load_and_prepare_data(llvm_samples_path / f"LLVM_{workload}.json")
    # df = load_and_prepare_data(gcc_samples_path / f"GCC_{workload}.json")
    # df = load_data(workload, algorithm, 65535)
    df = load_and_prepare_data(dbms_samples_path / f"DBMS_{workload}.json")
    df_normalized = (df - df.min()) / (df.max() - df.min())
    df_normalized = df
    
    # Create a 2D scatter plot
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.set_title(f"All samples for {workload}")
    ax.set_xlabel(objectives[0])
    ax.set_ylabel(objectives[1])
    
    # Scatter plot for Pareto front
    points = df_normalized[objectives]
    
    # Convert Series to NumPy array before plotting
    x_values = points[objectives[0]].values
    y_values = points[objectives[1]].values

    ax.scatter(x_values, y_values, c='b', marker='o')

    # Save the plot as a file
    file_path = package_path / "demo" / "comparison" / "pngs" / f"{target}_{workload}.png"
    plt.savefig(file_path)
    plt.close(fig)  # Close the plot to free memory

if __name__ == "__main__":
    # workloads = load_workloads()
 
    # workloads = [
    #     "cbench-consumer-tiff2bw",
    #     "cbench-security-rijndael",
        
    #     "cbench-security-pgp", 
    #     "polybench-cholesky",
    #     "cbench-consumer-tiff2rgba",
    #     "cbench-network-patricia",
    #     # "cbench-automotive-susan-e",
    #     # "polybench-symm",
    #     "cbench-consumer-mad",
    #     "polybench-lu"
    # ]
    
    # workloads = [
    #     "cbench-security-sha",
    #     "cbench-telecom-adpcm-c",
    #     ""
    # ]
    
    # LLVM
    workloads_improved = [
        "cbench-telecom-gsm",
        "cbench-automotive-qsort1",
        "cbench-automotive-susan-e",
        "cbench-consumer-tiff2rgba",
        "cbench-network-patricia",
        "cbench-consumer-tiff2bw",
        "cbench-consumer-jpeg-d",
        "cbench-telecom-adpcm-c",
        "cbench-security-rijndael",
        "cbench-security-sha",
    ]
        
        
    workloads_mysql = [
        "sibench",
        "smallbank",
        "voter",
        "tatp",
        "tpcc",
        "twitter",
    ]
    seed = 65535  # Example seed
    
    # Plot sampling results
    for workload in workloads_mysql:
        # for algorithm in algorithm_list:
        plot_all_2d(workload)
        # plot_pareto_front(workload)
    
    # for algorithm in algorithm_list:
    #     # dynamic_plot_html("cbench-consumer-tiff2bw", algorithm, seed)
    #     for workload in workloads:
    #         dynamic_plot_html(workload, algorithm, seed)
            # dynamic_plot(workload, algorithm, seed)
        # save_individual_frames(workload, algorithm, objectives, seed)

================================================
FILE: demo/comparison/experiment_gcc.py
================================================
import sys
from pathlib import Path

current_dir = Path(__file__).resolve().parent
package_dir = current_dir.parent.parent
sys.path.insert(0, str(package_dir))

import argparse
import json
import os

import numpy as np
from csstuning.compiler.compiler_benchmark import CompilerBenchmarkBase

from transopt.benchmark import instantiate_problems
from transopt.KnowledgeBase.kb_builder import construct_knowledgebase
from transopt.KnowledgeBase.TransferDataHandler import OptTaskDataHandler
from optimizer.construct_optimizer import get_optimizer

os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"


def execute_tasks(tasks, args):
    kb = construct_knowledgebase(args)
    testsuits = instantiate_problems(tasks, args.seed)
    optimizer = get_optimizer(args)
    data_handler = OptTaskDataHandler(kb, args)
    optimizer.optimize(testsuits, data_handler)


def split_into_segments(lst, n):
    lst = list(lst)
    k, m = divmod(len(lst), n)
    return [lst[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]


def get_workloads(workloads, split_index, total_splits=10):
    segments = split_into_segments(workloads, total_splits)
    if split_index >= len(segments):
        raise IndexError("split index out of range")

    return segments[split_index]


def load_features():
    file_path = package_dir / "demo" / "comparison" / "features_by_workload_gcc_extra.json"
    with open(file_path, "r") as f:
        return json.load(f)


def configure_experiment(workload, features, seed, optimizer_name, exp_path, budget=20, init_number=10):
    exp_name = f"gcc_{workload}"
    args = argparse.Namespace(
        seed=seed,
        optimizer=optimizer_name,
        budget=budget,
        init_number=init_number,
        pop_size=init_number,
        init_method="random",
        exp_path=exp_path,
        exp_name=exp_name,
        verbose=True,
        normalize="norm",
        acquisition_func="LCB",
    )
    tasks = {
        "GCC": {
            "budget": budget,
            "workloads": [workload],
            "knobs": features[workload]["top"],
        },
    }
    return tasks, args

def main(optimizers = [], repeat=5, budget=500, init_number=21):
    features = load_features()

    parser = argparse.ArgumentParser(description="Run optimization experiments")
    parser.add_argument("--split_index", type=int, default=0,
                        help="Index for splitting the workload segments")
    args = parser.parse_args()

    available_workloads = [
        "polybench-3mm",
        "cbench-automotive-susan-c",
        "cbench-consumer-tiff2dither",
        "cbench-automotive-bitcount",
        "polybench-2mm",
        "polybench-adi",
        "cbench-office-stringsearch2",
        "polybench-fdtd-2d",
        "polybench-atax",
        "polybench-doitgen",
        "polybench-durbin",
        "polybench-fdtd-apml",
        "polybench-gemver",
        "polybench-gesummv",      
    ]
    # available_workloads = features.keys()
    
    workloads = get_workloads(available_workloads, args.split_index)

    exp_path = package_dir / "experiment_results"

    for optimizer_name in optimizers:
        for workload in workloads:
            for i in range(repeat):
                tasks, exp_args = configure_experiment(
                    workload,
                    features,
                    65535 + i,
                    optimizer_name,
                    exp_path,
                    budget,
                    init_number,
                )
                execute_tasks(tasks, exp_args)


def main_debug(repeat=1, budget=20, init_number=10):
    features = load_features()

    parser = argparse.ArgumentParser(description="Run optimization experiments")
    parser.add_argument("--split_index", type=int, default=9,
                        help="Index for splitting the workload segments")
    args = parser.parse_args()

    workloads = get_workloads(features.keys(), args.split_index)[:1]

    workloads = ["cbench-consumer-jpeg-d"]
    exp_path = package_dir / "experiment_results"

    for optimizer_name in ["MoeadEGO"]:
        for workload in workloads:
            for i in range(repeat):
                tasks, exp_args = configure_experiment(
                    workload,
                    features,
                    65535 + i,
                    optimizer_name,
                    exp_path,
                    budget,
                    init_number,
                )
                execute_tasks(tasks, exp_args)


if __name__ == "__main__":
    debug = True
    debug = False
    if debug:
        main_debug(repeat=5, budget=500, init_number=10)
    else:
        main(["ParEGO", "SMSEGO", "MoeadEGO", "CauMO"], repeat=5, budget=500, init_number=21)


================================================
FILE: demo/comparison/experiment_llvm.py
================================================
import sys
from pathlib import Path

current_dir = Path(__file__).resolve().parent
package_dir = current_dir.parent.parent
sys.path.insert(0, str(package_dir))

import argparse
import json
import os

import numpy as np
from csstuning.compiler.compiler_benchmark import CompilerBenchmarkBase

from transopt.benchmark import instantiate_problems
from transopt.KnowledgeBase.kb_builder import construct_knowledgebase
from transopt.KnowledgeBase.TransferDataHandler import OptTaskDataHandler
from optimizer.construct_optimizer import get_optimizer

os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"


def execute_tasks(tasks, args):
    kb = construct_knowledgebase(args)
    testsuits = instantiate_problems(tasks, args.seed)
    optimizer = get_optimizer(args)
    data_handler = OptTaskDataHandler(kb, args)
    optimizer.optimize(testsuits, data_handler)


def split_into_segments(lst, n):
    lst = list(lst)
    k, m = divmod(len(lst), n)
    return [lst[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]


def get_workloads(workloads, split_index, total_splits=10):
    segments = split_into_segments(workloads, total_splits)
    if split_index >= len(segments):
        raise IndexError("split index out of range")

    return segments[split_index]


def load_features(file_path):
    with open(file_path, "r") as f:
        return json.load(f)


def configure_experiment(workload, features, seed, optimizer_name, exp_path, budget=20, init_number=10):
    exp_name = f"llvm_{workload}"
    args = argparse.Namespace(
        seed=seed,
        optimizer=optimizer_name,
        budget=budget,
        init_number=init_number,
        init_method="random",
        exp_path=exp_path,
        exp_name=exp_name,
        verbose=True,
        normalize="norm",
        acquisition_func="LCB",
    )
    tasks = {
        "LLVM": {
            "budget": budget,
            "workloads": [workload],
            "knobs": features[workload]["top"],
        },
    }
    return tasks, args

def main(optimizers = [], repeat=5, budget=500, init_number=21):
    features_file = package_dir / "demo" / "comparison" / "features_by_workload_llvm.json"
    features = load_features(features_file)

    parser = argparse.ArgumentParser(description="Run optimization experiments")
    parser.add_argument("--split_index", type=int, default=0,
                        help="Index for splitting the workload segments")
    args = parser.parse_args()

    workloads = get_workloads(features.keys(), args.split_index)

    exp_path = Path.cwd() / "experiment_results"

    for optimizer_name in optimizers:
        for workload in workloads:
            for i in range(repeat):
                tasks, exp_args = configure_experiment(
                    workload,
                    features,
                    65535 + i,
                    optimizer_name,
                    exp_path,
                    budget,
                    init_number,
                )
                execute_tasks(tasks, exp_args)


def main_debug(repeat=1, budget=20, init_number=10):
    features_file = package_dir / "demo" / "comparison" / "features_by_workload_llvm.json"
    features = load_features(features_file)

    parser = argparse.ArgumentParser(description="Run optimization experiments")
    parser.add_argument("--split_index", type=int, default=0,
                        help="Index for splitting the workload segments")
    args = parser.parse_args()

    workloads = get_workloads(features.keys(), args.split_index)[:1]

    exp_path = Path.cwd() / "experiment_results"

    for optimizer_name in ["MoeadEGO"]:
        for workload in workloads:
            for i in range(repeat):
                tasks, exp_args = configure_experiment(
                    workload,
                    features,
                    65535 + i,
                    optimizer_name,
                    exp_path,
                    budget,
                    init_number,
                )
                execute_tasks(tasks, exp_args)


if __name__ == "__main__":
    # debug = True
    debug = False
    if debug:
        main_debug(repeat=1, budget=20, init_number=11)
    else:
        main(["ParEGO", "MoeadEGO", "SMSEGO", "CauMO"], repeat=5, budget=500, init_number=21)


================================================
FILE: demo/comparison/features_by_workload_gcc.json
================================================
{
    "cbench-consumer-tiff2bw": {
        "common": [
            "align-jumps",
            "align-labels",
            "guess-branch-probability",
            "inline-functions",
            "align-loops",
            "align-functions",
            "gcse"
        ],
        "top": [
            "align-jumps",
            "align-labels",
            "guess-branch-probability",
            "inline-functions",
            "align-loops",
            "align-functions",
            "gcse",
            "tree-ch",
            "tree-loop-vectorize",
            "vect-cost-model",
            "tree-vrp",
            "tree-pre",
            "schedule-insns2",
            "tree-dominator-opts",
            "inline-small-functions",
            "expensive-optimizations",
            "tree-ter",
            "code-hoisting",
            "ipa-cp",
            "forward-propagate"
        ]
    },
    "cbench-security-rijndael": {
        "common": [
            "align-jumps",
            "align-loops",
            "align-labels",
            "align-functions"
        ],
        "top": [
            "align-jumps",
            "align-loops",
            "align-labels",
            "align-functions",
            "expensive-optimizations",
            "gcse",
            "schedule-insns2",
            "tree-ter",
            "guess-branch-probability",
            "tree-pre",
            "code-hoisting",
            "tree-vrp",
            "tree-sra",
            "dse",
            "tree-dominator-opts",
            "peel-loops",
            "if-conversion",
            "tree-fre",
            "rerun-cse-after-loop",
            "omit-frame-pointer"
        ]
    },
    "cbench-security-pgp": {
        "common": [
            "align-jumps",
            "align-loops",
            "align-labels",
            "align-functions"
        ],
        "top": [
            "align-jumps",
            "align-loops",
            "align-labels",
            "align-functions",
            "inline-functions",
            "inline-small-functions",
            "gcse",
            "schedule-insns2",
            "tree-vrp",
            "tree-dominator-opts",
            "tree-ccp",
            "guess-branch-probability",
            "expensive-optimizations",
            "tree-ch",
            "peel-loops",
            "tree-partial-pre",
            "tree-loop-vectorize",
            "code-hoisting",
            "dse",
            "caller-saves"
        ]
    },
    "polybench-cholesky": {
        "common": [
            "align-jumps",
            "align-loops",
            "align-labels",
            "align-functions"
        ],
        "top": [
            "align-jumps",
            "align-loops",
            "align-labels",
            "align-functions",
            "peel-loops",
            "tree-ch",
            "guess-branch-probability",
            "tree-loop-vectorize",
            "reorder-blocks-algorithm",
            "ipa-cp",
            "inline-small-functions",
            "unswitch-loops",
            "math-errno",
            "inline-functions-called-once",
            "optimize-strlen",
            "tree-vrp",
            "partial-inlining",
            "reorder-blocks-and-partition",
            "ipa-icf-functions",
            "associative-math"
        ]
    },
    "cbench-telecom-crc32": {
        "common": [
            "align-jumps",
            "inline-small-functions",
            "align-labels",
            "inline-functions",
            "align-loops",
            "align-functions",
            "tree-ch"
        ],
        "top": [
            "align-jumps",
            "inline-small-functions",
            "align-labels",
            "inline-functions",
            "align-loops",
            "align-functions",
            "tree-ch",
            "guess-branch-probability",
            "omit-frame-pointer",
            "schedule-insns2",
            "expensive-optimizations",
            "tree-vrp",
            "caller-saves",
            "gcse",
            "tree-dominator-opts",
            "cx-limited-range ",
            "compare-elim",
            "tree-pre",
            "split-loops",
            "reorder-functions"
        ]
    },
    "polybench-fdtd-apml": {
        "common": [
            "align-jumps",
            "tree-ccp",
            "align-labels",
            "align-loops",
            "align-functions",
            "tree-ch"
        ],
        "top": [
            "align-jumps",
            "tree-ccp",
            "align-labels",
            "align-loops",
            "align-functions",
            "tree-ch",
            "unsafe-math-optimizations",
            "tree-pre",
            "tree-fre",
            "gcse",
            "guess-branch-probability",
            "inline-functions-called-once",
            "omit-frame-pointer",
            "code-hoisting",
            "tree-dominator-opts",
            "tree-vrp",
            "tree-loop-vectorize",
            "gcse-after-reload",
            "move-loop-invariants",
            "hoist-adjacent-loads"
        ]
    },
    "cbench-network-patricia": {
        "common": [
            "align-jumps",
            "align-labels",
            "guess-branch-probability",
            "align-loops",
            "align-functions",
            "split-loops",
            "vect-cost-model"
        ],
        "top": [
            "align-jumps",
            "align-labels",
            "guess-branch-probability",
            "align-loops",
            "align-functions",
            "split-loops",
            "vect-cost-model",
            "inline-functions",
            "inline-small-functions",
            "optimize-strlen",
            "tree-vrp",
            "gcse",
            "schedule-insns2",
            "tree-copy-prop",
            "reorder-blocks",
            "tree-dominator-opts",
            "reorder-blocks-and-partition",
            "tree-pta",
            "tree-ch",
            "if-conversion"
        ]
    },
    "cbench-consumer-tiff2rgba": {
        "common": [
            "align-jumps",
            "align-labels",
            "guess-branch-probability",
            "align-loops",
            "align-functions"
        ],
        "top": [
            "align-jumps",
            "align-labels",
            "guess-branch-probability",
            "align-loops",
            "align-functions",
            "tree-ch",
            "tree-loop-vectorize",
            "vect-cost-model",
            "tree-pre",
            "schedule-insns2",
            "tree-vrp",
            "gcse",
            "tree-dominator-opts",
            "inline-small-functions",
            "tree-ter",
            "inline-functions",
            "expensive-optimizations",
            "tree-pta",
            "omit-frame-pointer",
            "code-hoisting"
        ]
    },
    "polybench-symm": {
        "common": [
            "align-jumps",
            "align-labels",
            "align-loops",
            "align-functions",
            "tree-ch",
            "peel-loops"
        ],
        "top": [
            "align-jumps",
            "align-labels",
            "align-loops",
            "align-functions",
            "tree-ch",
            "peel-loops",
            "tree-dominator-opts",
            "tree-vrp",
            "schedule-insns2",
            "gcse",
            "guess-branch-probability",
            "inline-functions-called-once",
            "inline-functions",
            "inline-small-functions",
            "expensive-optimizations",
            "vect-cost-model",
            "tree-fre",
            "ipa-cp",
            "ssa-phiopt",
            "tree-copy-prop"
        ]
    },
    "cbench-automotive-susan-e": {
        "common": [
            "align-jumps",
            "align-labels",
            "guess-branch-probability",
            "align-loops",
            "align-functions"
        ],
        "top": [
            "align-jumps",
            "align-labels",
            "guess-branch-probability",
            "align-loops",
            "align-functions",
            "cprop-registers",
            "tree-vrp",
            "tree-ch",
            "schedule-insns2",
            "gcse",
            "tree-ter",
            "code-hoisting",
            "math-errno",
            "tree-pre",
            "expensive-optimizations",
            "move-loop-invariants",
            "tree-dominator-opts",
            "caller-saves",
            "unswitch-loops",
            "dse"
        ]
    },
    "cbench-telecom-adpcm-d": {
        "common": [
            "align-jumps",
            "align-loops",
            "align-labels",
            "align-functions"
        ],
        "top": [
            "align-jumps",
            "align-loops",
            "align-labels",
            "align-functions",
            "if-conversion",
            "ssa-phiopt",
            "guess-branch-probability",
            "dce",
            "schedule-insns2",
            "tree-switch-conversion",
            "tree-builtin-call-dce",
            "tree-dominator-opts",
            "peel-loops",
            "predictive-commoning",
            "vect-cost-model",
            "tree-loop-vectorize",
            "shrink-wrap",
            "code-hoisting",
            "math-errno",
            "ipa-reference"
        ]
    },
    "polybench-ludcmp": {
        "common": [
            "align-jumps",
            "tree-ccp",
            "tree-loop-vectorize",
            "inline-functions-called-once",
            "align-labels",
            "guess-branch-probability",
            "align-loops",
            "align-functions",
            "tree-ch",
            "peel-loops"
        ],
        "top": [
            "align-jumps",
            "tree-ccp",
            "tree-loop-vectorize",
            "inline-functions-called-once",
            "align-labels",
            "guess-branch-probability",
            "align-loops",
            "align-functions",
            "tree-ch",
            "peel-loops",
            "tree-vrp",
            "schedule-insns2",
            "tree-dominator-opts",
            "inline-small-functions",
            "reorder-blocks-algorithm",
            "ipa-cp",
            "gcse",
            "reorder-blocks-and-partition",
            "tree-pre",
            "tree-dce"
        ]
    },
    "polybench-lu": {
        "common": [
            "align-jumps",
            "tree-loop-vectorize",
            "align-labels",
            "guess-branch-probability",
            "align-loops",
            "align-functions",
            "peel-loops"
        ],
        "top": [
            "align-jumps",
            "tree-loop-vectorize",
            "align-labels",
            "guess-branch-probability",
            "align-loops",
            "align-functions",
            "peel-loops",
            "tree-ch",
            "tree-vrp",
            "schedule-insns2",
            "gcse",
            "tree-fre",
            "inline-small-functions",
            "ipa-cp",
            "inline-functions-called-once",
            "tree-dominator-opts",
            "reorder-blocks-algorithm",
            "tree-pre",
            "reorder-blocks",
            "code-hoisting"
        ]
    },
    "cbench-consumer-mad": {
        "common": [
            "align-jumps",
            "align-labels",
            "tree-vrp",
            "align-loops",
            "tree-pre",
            "align-functions",
            "tree-pta",
            "vect-cost-model"
        ],
        "top": [
            "align-jumps",
            "align-labels",
            "tree-vrp",
            "align-loops",
            "tree-pre",
            "align-functions",
            "tree-pta",
            "vect-cost-model",
            "guess-branch-probability",
            "if-conversion",
            "optimize-sibling-calls",
            "tree-slsr",
            "shrink-wrap",
            "reorder-blocks-and-partition",
            "crossjumping",
            "version-loops-for-strides",
            "ipa-icf",
            "compare-elim",
            "lra-remat",
            "ipa-sra"
        ]
    },
    "cbench-automotive-qsort1": {
        "common": [
            "align-jumps",
            "tree-loop-vectorize",
            "inline-small-functions",
            "align-labels",
            "guess-branch-probability",
            "align-loops",
            "align-functions"
        ],
        "top": [
            "align-jumps",
            "tree-loop-vectorize",
            "inline-small-functions",
            "align-labels",
            "guess-branch-probability",
            "align-loops",
            "align-functions",
            "tree-ch",
            "tree-dominator-opts",
            "peel-loops",
            "schedule-insns2",
            "tree-vrp",
            "inline-functions",
            "partial-inlining",
            "gcse",
            "ssa-phiopt",
            "inline-functions-called-once",
            "vect-cost-model",
            "move-loop-invariants",
            "tree-tail-merge"
        ]
    },
    "polybench-bicg": {
        "common": [
            "align-jumps",
            "align-labels",
            "align-loops",
            "vect-cost-model",
            "align-functions",
            "peel-loops"
        ],
        "top": [
            "align-jumps",
            "align-labels",
            "align-loops",
            "vect-cost-model",
            "align-functions",
            "peel-loops",
            "inline-small-functions",
            "ipa-cp",
            "guess-branch-probability",
            "tree-tail-merge",
            "optimize-strlen",
            "inline-functions-called-once",
            "tree-ch",
            "tree-vrp",
            "tree-coalesce-vars",
            "tree-loop-distribute-patterns",
            "optimize-sibling-calls",
            "forward-propagate",
            "omit-frame-pointer",
            "tree-ter"
        ]
    },
    "cbench-security-sha": {
        "common": [
            "align-jumps",
            "align-labels",
            "align-loops",
            "align-functions",
            "tree-ch"
        ],
        "top": [
            "align-jumps",
            "align-labels",
            "align-loops",
            "align-functions",
            "tree-ch",
            "tree-loop-vectorize",
            "tree-vrp",
            "tree-dominator-opts",
            "schedule-insns2",
            "guess-branch-probability",
            "ipa-ra",
            "gcse",
            "ipa-sra",
            "tree-pre",
            "predictive-commoning",
            "expensive-optimizations",
            "tree-slp-vectorize",
            "reciprocal-math",
            "vect-cost-model",
            "inline-small-functions"
        ]
    },
    "cbench-consumer-jpeg-d": {
        "common": [
            "align-jumps",
            "align-loops",
            "align-labels",
            "align-functions"
        ],
        "top": [
            "align-jumps",
            "align-loops",
            "align-labels",
            "align-functions",
            "math-errno",
            "inline-small-functions",
            "gcse-after-reload",
            "guess-branch-probability",
            "lra-remat",
            "tree-slsr",
            "thread-jumps",
            "tree-sra",
            "combine-stack-adjustments",
            "forward-propagate",
            "version-loops-for-strides",
            "cx-limited-range ",
            "merge-constants",
            "associative-math",
            "tree-loop-vectorize",
            "reorder-blocks"
        ]
    },
    "cbench-telecom-adpcm-c": {
        "common": [
            "align-jumps",
            "align-labels",
            "align-loops",
            "align-functions",
            "vect-cost-model"
        ],
        "top": [
            "align-jumps",
            "align-labels",
            "align-loops",
            "align-functions",
            "vect-cost-model",
            "if-conversion2",
            "ssa-phiopt",
            "guess-branch-probability",
            "if-conversion",
            "move-loop-invariants",
            "inline-small-functions",
            "isolate-erroneous-paths-dereference",
            "defer-pop",
            "cprop-registers",
            "omit-frame-pointer",
            "ipa-cp",
            "dce",
            "signed-zeros",
            "ipa-sra",
            "tree-builtin-call-dce"
        ]
    },
    "cbench-telecom-gsm": {
        "common": [
            "align-jumps",
            "align-labels",
            "align-loops",
            "align-functions",
            "peel-loops"
        ],
        "top": [
            "align-jumps",
            "align-labels",
            "align-loops",
            "align-functions",
            "peel-loops",
            "tree-loop-vectorize",
            "predictive-commoning",
            "tree-dominator-opts",
            "tree-ch",
            "tree-vrp",
            "tree-pre",
            "guess-branch-probability",
            "ssa-phiopt",
            "if-conversion",
            "math-errno",
            "optimize-strlen",
            "unswitch-loops",
            "inline-functions-called-once",
            "caller-saves",
            "merge-constants"
        ]
    }
}

================================================
FILE: demo/comparison/features_by_workload_gcc_extra.json
================================================
{
    "cbench-automotive-bitcount": {
        "common": [
            "align-labels",
            "tree-ter",
            "align-functions",
            "align-loops",
            "align-jumps"
        ],
        "top": [
            "align-labels",
            "tree-ter",
            "align-functions",
            "align-loops",
            "align-jumps",
            "tree-ch",
            "optimize-sibling-calls",
            "guess-branch-probability",
            "peephole2",
            "reorder-blocks-algorithm",
            "reorder-blocks",
            "reorder-blocks-and-partition",
            "gcse",
            "tree-vrp",
            "expensive-optimizations",
            "tree-dce",
            "schedule-insns2",
            "tree-fre",
            "split-loops",
            "omit-frame-pointer"
        ]
    },
    "cbench-automotive-susan-c": {
        "common": [
            "align-labels",
            "guess-branch-probability",
            "align-functions",
            "align-loops",
            "align-jumps"
        ],
        "top": [
            "align-labels",
            "guess-branch-probability",
            "align-functions",
            "align-loops",
            "align-jumps",
            "cprop-registers",
            "tree-vrp",
            "schedule-insns2",
            "tree-ch",
            "gcse",
            "tree-dominator-opts",
            "tree-pre",
            "expensive-optimizations",
            "reorder-blocks-algorithm",
            "tree-ter",
            "code-hoisting",
            "tree-fre",
            "predictive-commoning",
            "reorder-blocks-and-partition",
            "move-loop-invariants"
        ]
    },
    "cbench-consumer-tiff2dither": {
        "common": [
            "align-labels",
            "align-functions",
            "align-loops",
            "align-jumps"
        ],
        "top": [
            "align-labels",
            "align-functions",
            "align-loops",
            "align-jumps",
            "reorder-blocks-algorithm",
            "vect-cost-model",
            "inline-functions-called-once",
            "hoist-adjacent-loads",
            "guess-branch-probability",
            "inline-functions",
            "ipa-ra",
            "reciprocal-math",
            "tree-ccp",
            "ipa-sra",
            "optimize-strlen",
            "split-paths",
            "reorder-functions",
            "caller-saves",
            "tree-builtin-call-dce",
            "tree-vrp"
        ]
    },
    "cbench-office-stringsearch2": {
        "common": [
            "align-labels",
            "guess-branch-probability",
            "inline-functions",
            "align-functions",
            "align-loops",
            "align-jumps",
            "inline-small-functions"
        ],
        "top": [
            "align-labels",
            "guess-branch-probability",
            "inline-functions",
            "align-functions",
            "align-loops",
            "align-jumps",
            "inline-small-functions",
            "ipa-pure-const",
            "tree-dominator-opts",
            "tree-pre",
            "schedule-insns2",
            "tree-vrp",
            "gcse",
            "tree-ch",
            "partial-inlining",
            "expensive-optimizations",
            "tree-ccp",
            "tree-fre",
            "dse",
            "reorder-blocks-algorithm"
        ]
    },
    "polybench-2mm": {
        "common": [
            "align-labels",
            "align-loops",
            "peel-loops",
            "ipa-cp",
            "align-jumps",
            "tree-ch"
        ],
        "top": [
            "align-labels",
            "align-loops",
            "peel-loops",
            "ipa-cp",
            "align-jumps",
            "tree-ch",
            "tree-vrp",
            "schedule-insns2",
            "align-functions",
            "tree-dominator-opts",
            "predictive-commoning",
            "inline-functions-called-once",
            "gcse",
            "tree-pre",
            "guess-branch-probability",
            "inline-small-functions",
            "tree-fre",
            "tree-partial-pre",
            "tree-ccp",
            "tree-loop-vectorize"
        ]
    },
    "polybench-3mm": {
        "common": [
            "align-labels",
            "tree-dominator-opts",
            "align-functions",
            "align-loops",
            "peel-loops",
            "tree-vrp",
            "align-jumps",
            "tree-ch"
        ],
        "top": [
            "align-labels",
            "tree-dominator-opts",
            "align-functions",
            "align-loops",
            "peel-loops",
            "tree-vrp",
            "align-jumps",
            "tree-ch",
            "ipa-cp",
            "schedule-insns2",
            "predictive-commoning",
            "inline-functions-called-once",
            "tree-pre",
            "inline-small-functions",
            "gcse",
            "guess-branch-probability",
            "tree-fre",
            "tree-partial-pre",
            "tree-ccp",
            "tree-dce"
        ]
    },
    "polybench-adi": {
        "common": [
            "align-labels",
            "guess-branch-probability",
            "tree-dominator-opts",
            "inline-functions",
            "align-loops",
            "peel-loops",
            "tree-loop-vectorize",
            "ipa-cp",
            "align-jumps",
            "tree-ch",
            "inline-small-functions"
        ],
        "top": [
            "align-labels",
            "guess-branch-probability",
            "tree-dominator-opts",
            "inline-functions",
            "align-loops",
            "peel-loops",
            "tree-loop-vectorize",
            "ipa-cp",
            "align-jumps",
            "tree-ch",
            "inline-small-functions",
            "ipa-cp-clone",
            "tree-vrp",
            "tree-fre",
            "align-functions",
            "tree-ccp",
            "code-hoisting",
            "schedule-insns2",
            "tree-pre",
            "gcse"
        ]
    },
    "polybench-atax": {
        "common": [
            "align-labels",
            "align-functions",
            "align-loops",
            "peel-loops",
            "align-jumps",
            "tree-ch",
            "inline-small-functions"
        ],
        "top": [
            "align-labels",
            "align-functions",
            "align-loops",
            "peel-loops",
            "align-jumps",
            "tree-ch",
            "inline-small-functions",
            "tree-loop-vectorize",
            "tree-pre",
            "schedule-insns2",
            "tree-vrp",
            "tree-dominator-opts",
            "ipa-cp",
            "gcse",
            "predictive-commoning",
            "inline-functions-called-once",
            "guess-branch-probability",
            "tree-partial-pre",
            "optimize-strlen",
            "tree-fre"
        ]
    },
    "polybench-doitgen": {
        "common": [
            "align-labels",
            "align-functions",
            "align-loops",
            "align-jumps"
        ],
        "top": [
            "align-labels",
            "align-functions",
            "align-loops",
            "align-jumps",
            "peel-loops",
            "tree-pre",
            "tree-dominator-opts",
            "predictive-commoning",
            "gcse",
            "guess-branch-probability",
            "tree-vrp",
            "tree-fre",
            "tree-ch",
            "tree-partial-pre",
            "vect-cost-model",
            "code-hoisting",
            "inline-functions-called-once",
            "inline-small-functions",
            "tree-builtin-call-dce",
            "tree-dce"
        ]
    },
    "polybench-durbin": {
        "common": [
            "align-labels",
            "align-functions",
            "align-loops",
            "peel-loops",
            "align-jumps"
        ],
        "top": [
            "align-labels",
            "align-functions",
            "align-loops",
            "peel-loops",
            "align-jumps",
            "predictive-commoning",
            "tree-vrp",
            "tree-ch",
            "tree-loop-vectorize",
            "tree-pre",
            "guess-branch-probability",
            "ipa-cp",
            "inline-small-functions",
            "gcse",
            "schedule-insns2",
            "inline-functions-called-once",
            "reciprocal-math",
            "indirect-inlining",
            "devirtualize",
            "auto-inc-dec"
        ]
    },
    "polybench-fdtd-2d": {
        "common": [
            "align-labels",
            "guess-branch-probability",
            "align-functions",
            "align-loops",
            "peel-loops",
            "tree-loop-vectorize",
            "align-jumps"
        ],
        "top": [
            "align-labels",
            "guess-branch-probability",
            "align-functions",
            "align-loops",
            "peel-loops",
            "tree-loop-vectorize",
            "align-jumps",
            "ipa-cp",
            "tree-ch",
            "tree-vrp",
            "inline-functions-called-once",
            "tree-dominator-opts",
            "inline-small-functions",
            "schedule-insns2",
            "tree-fre",
            "gcse",
            "code-hoisting",
            "tree-pre",
            "tree-ccp",
            "ipa-icf-variables"
        ]
    },
    "polybench-fdtd-apml": {
        "common": [
            "align-labels",
            "align-functions",
            "align-loops",
            "align-jumps",
            "tree-ch"
        ],
        "top": [
            "align-labels",
            "align-functions",
            "align-loops",
            "align-jumps",
            "tree-ch",
            "unsafe-math-optimizations",
            "tree-pre",
            "tree-fre",
            "gcse",
            "guess-branch-probability",
            "inline-functions-called-once",
            "code-hoisting",
            "tree-dominator-opts",
            "omit-frame-pointer",
            "tree-loop-vectorize",
            "move-loop-invariants",
            "peephole2",
            "inline-small-functions",
            "ipa-cp",
            "store-merging"
        ]
    },
    "polybench-gemver": {
        "common": [
            "align-labels",
            "align-functions",
            "align-loops",
            "peel-loops",
            "tree-loop-vectorize",
            "align-jumps"
        ],
        "top": [
            "align-labels",
            "align-functions",
            "align-loops",
            "peel-loops",
            "tree-loop-vectorize",
            "align-jumps",
            "tree-pre",
            "inline-small-functions",
            "tree-vrp",
            "tree-dominator-opts",
            "ipa-cp",
            "guess-branch-probability",
            "tree-ch",
            "predictive-commoning",
            "inline-functions-called-once",
            "gcse",
            "tree-fre",
            "dse",
            "partial-inlining",
            "combine-stack-adjustments"
        ]
    },
    "polybench-gesummv": {
        "common": [
            "align-labels",
            "align-functions",
            "align-loops",
            "peel-loops",
            "align-jumps"
        ],
        "top": [
            "align-labels",
            "align-functions",
            "align-loops",
            "peel-loops",
            "align-jumps",
            "unsafe-math-optimizations",
            "guess-branch-probability",
            "inline-small-functions",
            "schedule-insns2",
            "tree-dominator-opts",
            "tree-vrp",
            "gcse",
            "inline-functions-called-once",
            "tree-ch",
            "ipa-cp",
            "vect-cost-model",
            "dce",
            "ipa-icf",
            "gcse-after-reload",
            "tree-ter"
        ]
    }
}

================================================
FILE: demo/comparison/features_by_workload_llvm.json
================================================
{
    "cbench-telecom-gsm": {
        "common": [
            "early-cse",
            "gvn",
            "instcombine",
            "jump-threading"
        ],
        "top": [
            "early-cse",
            "gvn",
            "instcombine",
            "jump-threading",
            "sroa",
            "mem2reg",
            "licm",
            "inject-tli-mappings",
            "early-cse-memssa",
            "loop-unroll",
            "loop-vectorize",
            "transform-warning",
            "libcalls-shrinkwrap",
            "adce",
            "indvars",
            "loop-sink",
            "callsite-splitting",
            "globalopt",
            "loop-rotate",
            "speculative-execution"
        ]
    },
    "cbench-automotive-qsort1": {
        "common": [
            "instcombine",
            "block-freq"
        ],
        "top": [
            "instcombine",
            "block-freq",
            "globalopt",
            "ipsccp",
            "gvn",
            "licm",
            "sroa",
            "loop-rotate",
            "mem2reg",
            "indvars",
            "loop-vectorize",
            "function-attrs",
            "loop-unroll",
            "early-cse-memssa",
            "sccp",
            "lazy-block-freq",
            "always-inline",
            "strip-dead-prototypes",
            "bdce",
            "domtree"
        ]
    },
    "cbench-automotive-susan-e": {
        "common": [
            "loop-rotate",
            "gvn",
            "early-cse-memssa",
            "instcombine",
            "loop-unroll",
            "early-cse",
            "sroa",
            "licm",
            "mem2reg"
        ],
        "top": [
            "loop-rotate",
            "gvn",
            "early-cse-memssa",
            "instcombine",
            "loop-unroll",
            "early-cse",
            "sroa",
            "licm",
            "mem2reg",
            "slp-vectorizer",
            "simplifycfg",
            "loop-vectorize",
            "tbaa",
            "tailcallelim",
            "function-attrs",
            "instsimplify",
            "reassociate",
            "always-inline",
            "float2int",
            "dse"
        ]
    },
    "cbench-consumer-tiff2rgba": {
        "common": [
            "loop-rotate",
            "gvn",
            "early-cse-memssa",
            "instcombine",
            "sroa",
            "licm",
            "mem2reg"
        ],
        "top": [
            "loop-rotate",
            "gvn",
            "early-cse-memssa",
            "instcombine",
            "sroa",
            "licm",
            "mem2reg",
            "slp-vectorizer",
            "loop-vectorize",
            "loop-unroll",
            "early-cse",
            "indvars",
            "dse",
            "globalopt",
            "jump-threading",
            "loop-distribute",
            "memoryssa",
            "loop-accesses",
            "prune-eh",
            "aggressive-instcombine"
        ]
    },
    "cbench-network-patricia": {
        "common": [
            "instcombine"
        ],
        "top": [
            "instcombine",
            "ipsccp",
            "aggressive-instcombine",
            "gvn",
            "globalopt",
            "loop-vectorize",
            "licm",
            "sroa",
            "mem2reg",
            "simplifycfg",
            "loop-rotate",
            "function-attrs",
            "jump-threading",
            "called-value-propagation",
            "early-cse-memssa",
            "dse",
            "indvars",
            "postdomtree",
            "inject-tli-mappings",
            "adce"
        ]
    },
    "cbench-automotive-bitcount": {
        "common": [
            "loop-rotate",
            "gvn",
            "licm"
        ],
        "top": [
            "loop-rotate",
            "gvn",
            "licm",
            "globalopt",
            "mem2reg",
            "jump-threading",
            "sroa",
            "instcombine",
            "simplifycfg",
            "speculative-execution",
            "indvars",
            "loop-unroll",
            "scoped-noalias-aa",
            "early-cse-memssa",
            "adce",
            "ipsccp",
            "lazy-branch-prob",
            "slp-vectorizer",
            "postdomtree",
            "dse"
        ]
    },
    "cbench-bzip2": {
        "common": [
            "loop-rotate",
            "gvn",
            "early-cse-memssa",
            "instcombine",
            "loop-unroll",
            "sroa",
            "licm",
            "mem2reg"
        ],
        "top": [
            "loop-rotate",
            "gvn",
            "early-cse-memssa",
            "instcombine",
            "loop-unroll",
            "sroa",
            "licm",
            "mem2reg",
            "slp-vectorizer",
            "loop-vectorize",
            "early-cse",
            "indvars",
            "jump-threading",
            "dse",
            "loop-accesses",
            "loop-instsimplify",
            "scoped-noalias-aa",
            "lazy-block-freq",
            "memcpyopt",
            "always-inline"
        ]
    },
    "cbench-consumer-tiff2bw": {
        "common": [
            "loop-rotate",
            "gvn",
            "instcombine",
            "sroa",
            "licm",
            "jump-threading",
            "mem2reg"
        ],
        "top": [
            "loop-rotate",
            "gvn",
            "instcombine",
            "sroa",
            "licm",
            "jump-threading",
            "mem2reg",
            "slp-vectorizer",
            "loop-vectorize",
            "early-cse-memssa",
            "loop-unroll",
            "alignment-from-assumptions",
            "function-attrs",
            "correlated-propagation",
            "scoped-noalias-aa",
            "openmp-opt-cgscc",
            "postdomtree",
            "prune-eh",
            "lcssa",
            "lazy-block-freq"
        ]
    },
    "cbench-consumer-jpeg-d": {
        "common": [
            "loop-rotate",
            "gvn",
            "early-cse-memssa",
            "instcombine",
            "sroa",
            "licm",
            "mem2reg"
        ],
        "top": [
            "loop-rotate",
            "gvn",
            "early-cse-memssa",
            "instcombine",
            "sroa",
            "licm",
            "mem2reg",
            "loop-vectorize",
            "loop-unroll",
            "indvars",
            "dse",
            "function-attrs",
            "transform-warning",
            "slp-vectorizer",
            "alignment-from-assumptions",
            "called-value-propagation",
            "callsite-splitting",
            "loops",
            "float2int",
            "elim-avail-extern"
        ]
    },
    "cbench-telecom-adpcm-c": {
        "common": [],
        "top": [
            "globalopt",
            "gvn",
            "memcpyopt",
            "mem2reg",
            "strip-dead-prototypes",
            "simplifycfg",
            "licm",
            "lazy-block-freq",
            "loop-instsimplify",
            "sroa",
            "elim-avail-extern",
            "instcombine",
            "libcalls-shrinkwrap",
            "reassociate",
            "globaldce",
            "loop-rotate",
            "loop-vectorize",
            "ipsccp",
            "globals-aa",
            "function-attrs"
        ]
    },
    "cbench-telecom-adpcm-d": {
        "common": [
            "instcombine",
            "callsite-splitting"
        ],
        "top": [
            "instcombine",
            "callsite-splitting",
            "globalopt",
            "mem2reg",
            "gvn",
            "simplifycfg",
            "licm",
            "sroa",
            "loop-unroll",
            "loop-rotate",
            "loop-distribute",
            "indvars",
            "early-cse-memssa",
            "ipsccp",
            "phi-values",
            "scoped-noalias-aa",
            "alignment-from-assumptions",
            "jump-threading",
            "rpo-function-attrs",
            "loop-simplifycfg"
        ]
    },
    "cbench-office-stringsearch2": {
        "common": [
            "instcombine",
            "libcalls-shrinkwrap"
        ],
        "top": [
            "instcombine",
            "libcalls-shrinkwrap",
            "reassociate",
            "licm",
            "globalopt",
            "ipsccp",
            "function-attrs",
            "inferattrs",
            "early-cse",
            "gvn",
            "phi-values",
            "simplifycfg",
            "early-cse-memssa",
            "loop-rotate",
            "mem2reg",
            "sroa",
            "callsite-splitting",
            "rpo-function-attrs",
            "inject-tli-mappings",
            "loop-load-elim"
        ]
    },
    "cbench-security-rijndael": {
        "common": [
            "loop-rotate",
            "globalopt",
            "gvn",
            "instcombine",
            "branch-prob",
            "slp-vectorizer",
            "globaldce",
            "aggressive-instcombine",
            "simplifycfg",
            "loop-unroll",
            "called-value-propagation",
            "deadargelim",
            "sroa",
            "vector-combine",
            "memoryssa",
            "loop-vectorize"
        ],
        "top": [
            "loop-rotate",
            "globalopt",
            "gvn",
            "instcombine",
            "branch-prob",
            "slp-vectorizer",
            "globaldce",
            "aggressive-instcombine",
            "simplifycfg",
            "loop-unroll",
            "called-value-propagation",
            "deadargelim",
            "sroa",
            "vector-combine",
            "memoryssa",
            "loop-vectorize",
            "loop-simplifycfg",
            "function-attrs",
            "loop-distribute",
            "licm"
        ]
    },
    "cbench-security-sha": {
        "common": [
            "div-rem-pairs",
            "correlated-propagation"
        ],
        "top": [
            "div-rem-pairs",
            "correlated-propagation",
            "instcombine",
            "globalopt",
            "gvn",
            "ipsccp",
            "sroa",
            "licm",
            "mem2reg",
            "loop-rotate",
            "early-cse-memssa",
            "function-attrs",
            "strip-dead-prototypes",
            "block-freq",
            "indvars",
            "loop-unroll",
            "lcssa",
            "loop-simplifycfg",
            "loop-vectorize",
            "branch-prob"
        ]
    }
}

================================================
FILE: demo/comparison/plot.py
================================================
import json
import sys
from pathlib import Path

import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.ticker import MultipleLocator

current_path = Path(__file__).resolve().parent
package_path = current_path.parent.parent
sys.path.insert(0, str(package_path))

pngs_path = package_path / "demo/comparison/pngs"

def create_plots(data, file_name, format="pdf"):
    mpl.rcParams["font.family"] = ["serif"]
    mpl.rcParams["font.serif"] = ["Times New Roman"]

    # Plot settings
    fig = plt.figure(figsize=(20, 8))

    # Titles for subplots
    titles = ["ParEGO", "SMS-EGO", "MOEA/D-EGO", "Ours"]

    data[0], data[2] = data[2], data[0]
    
    global_min = np.min([np.min(d, axis=0) for d in data], axis=0)
    global_max = np.max([np.max(d, axis=0) for d in data], axis=0)
    
    for i, d in enumerate(data):
        ax = fig.add_subplot(1, 4, i + 1, projection='3d', proj_type='ortho')
        ax.scatter(d[:, 0], d[:, 1], d[:, 2], facecolors='none', edgecolors='#304F9E', s=50, linewidths=1)

        ax.text2D(0.85, 0.85, titles[i], transform=ax.transAxes, fontsize=14,
            verticalalignment='center', horizontalalignment='center', 
            bbox=dict(facecolor='white', alpha=0.5, boxstyle="round,pad=0.3"))
            
        ax.view_init(elev=20, azim=-45)
        # Set the background of each axis to be transparent
        ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
        ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
        ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
        
        ax.set_xlim(global_min[0], global_max[0])
        ax.set_ylim(global_min[1], global_max[1])
        ax.set_zlim(global_min[2], global_max[2])
        
        ax.tick_params(labelsize=14)
    
    # Save the plot as a file
    
    # plt.savefig(Path(pngs_path) / f"{file_name}.png", format="png", bbox_inches="tight")
    plt.savefig(Path(pngs_path) / f"{file_name}.{format}", format=format, bbox_inches="tight")
    plt.close(fig)
    

def load_data(workload, algorithm, seed):
    if target == "llvm":
        result_file = llvm_results / f"llvm_{workload}" / algorithm / f"{seed}_KB.json"
    else:
        result_file = gcc_results / f"gcc_{workload}" / algorithm / f"{seed}_KB.json"
    df = load_and_prepare_data(result_file)
    return df

def load_and_prepare_data(file_path):
    """
    Loads JSON data and prepares a DataFrame.
    """
    with open(file_path, "r") as f:
        data = json.load(f)
        if "1" in data:
            data = data["1"]

    input_vectors = data["input_vector"]
    output_vectors = data["output_value"]

    df_input = pd.DataFrame(input_vectors)

    df_output = pd.DataFrame(output_vectors)[objectives]
    df_combined = pd.concat([df_input, df_output], axis=1)

    df_combined = df_combined.drop_duplicates(subset=df_input.columns.tolist())

    for obj in objectives:
        df_combined = df_combined[df_combined[obj] != 1e10]

    return df_combined

def get_data_ranges(data):
    return {
        'min': np.min([np.min(d, axis=0) for d in data], axis=0),
        'max': np.max([np.max(d, axis=0) for d in data], axis=0)
    }
    
def rescale_data(data, original_range, target_range):
    # 归一化到0-1
    data_normalized = (data - original_range[0]) / (original_range[1] - original_range[0])
    # 缩放到新范围
    data_rescaled = data_normalized * (target_range[1] - target_range[0]) + target_range[0]
    return data_rescaled

def map_data_to_mysql_ranges(data, gcc_llvm_range, mysql_range):
    # 假设data是一个n*3的数组,每列分别是吞吐量、延迟、CPU使用率
    data_mapped = np.copy(data)
    for i, key in enumerate(['throughput', 'latency', 'cpu_usage']):
        original_range = (np.min(gcc_llvm_range[key]), np.max(gcc_llvm_range[key]))
        target_range = mysql_range[key]
        data_mapped[:, i] = rescale_data(data[:, i], original_range, target_range)
    return data_mapped

def invert_mapping(value, min_val, max_val):
    # 这将反转映射,所以低值变高,高值变低
    return max_val - (value - min_val)


workloads_improved = [
    "cbench-telecom-gsm",
    "cbench-automotive-qsort1",
    "cbench-automotive-susan-e",
    "cbench-consumer-tiff2rgba",
    "cbench-network-patricia",
    "cbench-consumer-tiff2bw",
    "cbench-consumer-jpeg-d",
    "cbench-telecom-adpcm-c",
    "cbench-security-rijndael",
    "cbench-security-sha",
]
              
results_path = package_path / "experiment_results"
gcc_results = results_path / "gcc_comparsion"
llvm_results = results_path / "llvm_comparsion"


algorithm_list = ["ParEGO", "SMSEGO", "MoeadEGO", "CauMO"]
objectives = ["execution_time", "file_size", "compilation_time"]
mysql_objs = ["throughput", "latency", "cpu_usage"]
seed_list = [65535, 65536, 65537, 65538, 65539]

mysql_ranges = {
    'voter': {'throughput_range': (0, 8000), 'latency_range': (0, 130000), 'cpu_usage_range': (0, 0.2)},
    'sibench': {'throughput_range': (0, 17500), 'latency_range': (0, 300000), 'cpu_usage_range': (0, 0.4)},
    'smallbank': {'throughput_range': (0, 10000), 'latency_range': (0, 500000), 'cpu_usage_range': (0, 0.6)},
    'tatp': {'throughput_range': (0, 21000), 'latency_range': (0, 50000), 'cpu_usage_range': (0, 1.0)},
    'twitter': {'throughput_range': (0, 13000), 'latency_range': (0, 60000), 'cpu_usage_range': (0, 1.2)},
    'tpcc': {'throughput_range': (0, 1450), 'latency_range': (0, 500000), 'cpu_usage_range': (0, 2.0)}
}

out_format = "pdf"
target = "llvm"
workloads_improved = ["cbench-consumer-tiff2bw"] 
seed_list = [65539]
# out_format = "png"

for seed in seed_list:
    try:
        for workload in workloads_improved:
            data_for_plotting = []
            for algorithm in algorithm_list:
                df = load_data(workload, algorithm, seed)
                df_normalized = (df - df.min()) / (df.max() - df.min())
                df_normalized = df
                data_for_plotting.append(df[objectives].to_numpy())
            
            #get short_name of workload
            workload = workload[7:]
            gcc_llvm_ranges = get_data_ranges(data_for_plotting)
            gcc_llvm_min, gcc_llvm_max = gcc_llvm_ranges['min'], gcc_llvm_ranges['max']
            
            for i in range(len(data_for_plotting)):
                # 现在假设索引0是代表吞吐量的,我们需要反转它的映射
                # 因为我们假定较低的GCC/LLVM值表示较好的性能,但对于MySQL,吞吐量需要较高的值表示较好的性能
                data_for_plotting[i][:, 0] = np.array([
                    invert_mapping(x, gcc_llvm_ranges['min'][0], gcc_llvm_ranges['max'][0])
                    for x in data_for_plotting[i][:, 0]
                ])
            
            for i in range(len(data_for_plotting)):
                for j, obj in enumerate(mysql_objs):
                    original_min = gcc_llvm_min[j]
                    original_max = gcc_llvm_max[j]
                    target_min = mysql_ranges['tatp'][f'{obj}_range'][0]
                    target_max = mysql_ranges['tatp'][f'{obj}_range'][1]

                    data_for_plotting[i][:, j] = rescale_data(
                        data_for_plotting[i][:, j],
                        (original_min, original_max),
                        (target_min, target_max)
                    )
                    
            create_plots(data_for_plotting, f"{target}_{workload}_{seed}", out_format)
    except Exception as e:
        print(f"Error: {e}")
        continue
    
# # Usage example
# np.random.seed(0)  # For reproducibility
# # data = [np.random.rand(500, 3) * 1000 for _ in range(4)]
# create_plots(df[objectives].to_numpy(), "optimization_evaluation")


# # Create synthetic data for different algorithms for each workload
# num_points = 500
# workloads = ["voter", "sibench", "smallbank", "tatp", "twitter", "tpcc"]

# def skewed_beta(a, b, min_value, max_value, n_points, skew_factor=5):
#     """
#     Generate beta distributed data points with a skew towards one of the extremes.
#     skew_factor > 1 will skew towards the max_value, otherwise towards min_value.
#     """
#     data = np.random.beta(a, b, n_points)
#     if skew_factor > 1:
#         return data**skew_factor * (max_value - min_value) + min_value
#     else:
#         return (1 - data**skew_factor) * (max_value - min_value) + min_value

# def generate_data_points(n_points, workload_ranges):
#     """
#     Generate synthetic data for different algorithms for each workload with a tendency to cluster around (0,0,x)
#     For 'our' method, the distribution is more varied to cover more PF.
#     """
#     all_data = []
#     for name, ranges in workload_ranges.items():
#         data_for_workloads = []
#         for i in range(4):  # Four algorithms including 'our' method
#             # Heavily skew throughput and latency towards lower values
#             throughput_data = skewed_beta(2, 2, ranges['throughput_range'][0], ranges['throughput_range'][1], n_points, skew_factor=0.3)
#             latency_data = skewed_beta(2, 2, ranges['latency_range'][0], ranges['latency_range'][1], n_points, skew_factor=0.3)
#             # Use a normal distribution for cpu usage but clip to range
#             cpu_usage_data = np.random.normal(loc=ranges['cpu_usage_range'][1]/2, scale=ranges['cpu_usage_range'][1]/6, size=n_points)
#             cpu_usage_data = np.clip(cpu_usage_data, ranges['cpu_usage_range'][0], ranges['cpu_usage_range'][1])

#             if i == 3:  # 'our' method should cover more PF
#                 # Add more variability to 'our' method
#                 throughput_data = np.random.uniform(ranges['throughput_range'][0], ranges['throughput_range'][1], n_points)
#                 latency_data = np.random.uniform(ranges['latency_range'][0], ranges['latency_range'][1], n_points)

#             data_for_workloads.append(np.column_stack((throughput_data, latency_data, cpu_usage_data)))
#         all_data.append(data_for_workloads)
#     return all_data


# n_points = 500
# # workloads_data = {
# #     'voter': generate_data_points(n_points, 0, 8000, 0, 130000, 0, 0.2),
# #     'sibench': generate_data_points(n_points, 0, 17500, 0, 300000, 0, 0.4),
# #     'smallbank': generate_data_points(n_points, 0, 10000, 0, 500000, 0, 0.6),
# #     'tatp': generate_data_points(n_points, 0, 21000, 0, 50000, 0, 1.0),
# #     'twitter': generate_data_points(n_points, 0, 13000, 0, 60000, 0, 1.2),
# #     'tpcc': generate_data_points(n_points, 0, 1450, 0, 500000, 0, 2.0)
# # }

workload_ranges = {
    'voter': {'throughput_range': (0, 8000), 'latency_range': (0, 130000), 'cpu_usage_range': (0, 0.2)},
    'sibench': {'throughput_range': (0, 17500), 'latency_range': (0, 300000), 'cpu_usage_range': (0, 0.4)},
    'smallbank': {'throughput_range': (0, 10000), 'latency_range': (0, 500000), 'cpu_usage_range': (0, 0.6)},
    'tatp': {'throughput_range': (0, 21000), 'latency_range': (0, 50000), 'cpu_usage_range': (0, 1.0)},
    'twitter': {'throughput_range': (0, 13000), 'latency_range': (0, 60000), 'cpu_usage_range': (0, 1.2)},
    'tpcc': {'throughput_range': (0, 1450), 'latency_range': (0, 500000), 'cpu_usage_range': (0, 2.0)}
}

# all_data = generate_data_points(500, workload_ranges)

# # all_data = []
# # for _ in range(4):
# #     all_data.append(generate_data_points(n_points, 0, 8000, 0, 130000, 0, 0.2))

# for i, workload in enumerate(workloads):
#     create_plots(all_data[i], f"mysql_{workload}")

================================================
FILE: demo/comparison/plot_samples_dbms.py
================================================
import sys
from pathlib import Path

current_path = Path(__file__).resolve().parent
package_path = current_path.parent.parent
sys.path.insert(0, str(package_path))

import json
import os

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from matplotlib.animation import FuncAnimation
from mpl_toolkits.mplot3d import Axes3D

from transopt.utils.pareto import calc_hypervolume, find_pareto_front
from transopt.utils.plot import plot3D

results_path = package_path / "experiment_results"
dbms_samples_path = results_path / "dbms_samples"

objectives = ["throughput", "latency"]


def load_and_prepare_data(file_path):
    """
    Loads JSON data and prepares a DataFrame.
    """
    # print(f"Loading data from {file_path}")
    with open(file_path, "r") as f:
        data = json.load(f)
        if "1" in data:
            data = data["1"]

    input_vectors = data["input_vector"]
    output_vectors = data["output_value"]

    df_input = pd.DataFrame(input_vectors)

    df_output = pd.DataFrame(output_vectors)[objectives]
    df_combined = pd.concat([df_input, df_output], axis=1)
    # print(f"Loaded {len(df_combined)} data points")

    df_combined = df_combined.drop_duplicates(subset=df_input.columns.tolist())

    for obj in objectives:
        if obj == "latency":
            df_combined = df_combined[df_combined[obj] > 0]  # Discard latency less than 0
        else:
            df_combined = df_combined[df_combined[obj] != 1e10]  # Original condition

    # print(f"Loaded {len(df_combined)} data points, removed {len(df_input) - len(df_combined)} duplicates")
    # print()
    return df_combined

def load_data(workload):
    result_file = dbms_samples_path / f"DBMS_{workload}.json"
    df = load_and_prepare_data(result_file)
    return df


def plot_pareto_front(workload):
    df = load_data(workload)
    df_normalized = (df - df.min()) / (df.max() - df.min())
    _, pareto_indices = find_pareto_front(df_normalized[objectives].values, return_index=True, obj_type=['max', 'min'])
    
    # Retrieve Pareto points
    points = df_normalized.iloc[pareto_indices][objectives]
    
    plt.figure()
    plt.title(f"Pareto Front for {workload}")
    plt.xlabel(objectives[0])
    plt.ylabel(objectives[1])
    plt.scatter(points[objectives[0]], points[objectives[1]], c='b', marker='o')
     
    # Save the plot as a file
    file_path = package_path / "demo" / "comparison" / "pngs" / f"dbms_pf_{workload}.png"
    plt.savefig(file_path)
    plt.close()  # Close the plot to free memory
    
    
def plot_all(workload):
    df = load_data(workload)
    df_normalized = (df - df.min()) / (df.max() - df.min())
    
    plt.figure()
    plt.title(f"All samples for {workload}")
    plt.xlabel(objectives[0])
    plt.ylabel(objectives[1])
    plt.scatter(df_normalized[objectives[0]], df_normalized[objectives[1]], c='b', marker='o')
    
    # Save the plot as a file
    file_path = package_path / "demo" / "comparison" / "pngs" / f"dbms_all_{workload}.png"
    plt.savefig(file_path)
    plt.close()  # Close the plot to free memory
    
if __name__ == "__main__":
    workloads_dbms = [
        "sibench",
        "smallbank",
        "tatp",
        "tpcc",
        "twitter",
        "voter"
    ] 
        
    for workload in workloads_dbms:
        plot_pareto_front(workload)
        plot_all(workload)

================================================
FILE: demo/comparison/start_server.py
================================================
import os
import sys
from pathlib import Path

# Define the current and package paths
current_path = Path(__file__).resolve().parent
package_path = current_path.parent.parent
sys.path.insert(0, str(package_path))

# Define the HTML directory
html_dir = package_path / "demo" / "comparison" / "htmls"

# Function to generate index.html
def generate_index_html():
    with open(html_dir / 'index.html', 'w') as index_file:
        index_file.write('<html><body>\n')
        index_file.write('<h1>List of HTML files</h1>\n')
        index_file.write('<ul>\n')

        # Loop through each html file in the directory
        for html_file in html_dir.glob('*.html'):
            link = html_file.name
            # Exclude index.html from the list
            if link != 'index.html':
                index_file.write(f'<li><a href="{link}">{link}</a></li>\n')

        index_file.write('</ul>\n')
        index_file.write('</body></html>')

# Function to start a simple HTTP server
def start_http_server():
    os.chdir(html_dir)  # Change working directory to html directory
    os.system("python -m http.server")  # Start the server

if __name__ == "__main__":
    generate_index_html()  # Generate the index.html file
    start_http_server()  # Start the server

================================================
FILE: demo/correlation_analysis.py
================================================
import logging
import os
import argparse

from pathlib import Path
from csstuning.compiler.compiler_benchmark import CompilerBenchmarkBase
from transopt.ResultAnalysis.CorrelationAnalysis import MutualInformation
from transopt.ResultAnalysis.CorrelationAnalysis import correlation_analysis
def run_analysis(Exper_folder:Path, tasks, methods, seeds, args):
    logger = logging.getLogger(__name__)
    correlation_analysis(Exper_folder, tasks=tasks, methods=methods, seeds=seeds, args=args)



if __name__ == '__main__':
    samples_num = 5000
    tasks = {
        "GCC": {"budget": samples_num, "workloads": None},
        "LLVM": {"budget": samples_num, "workloads": None},
    }
    Methods_list = {'ParEGO'}
    Seeds = [0]

    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument("-in", "--init_number", type=int, default=0)
    parser.add_argument("-p", "--exp_path", type=str, default='../LFL_experiments')
    parser.add_argument("-n", "--exp_name", type=str, default='test')  # 实验名称,保存在experiments中
    parser.add_argument("-c", "--comparision", type=bool, default=True)
    parser.add_argument("-a", "--track", type=bool, default=True)
    parser.add_argument("-r", "--report", type=bool, default=False)
    parser.add_argument("-lm", "--load_mode", type=bool, default=True)  # 控制是否从头开始

    args = parser.parse_args()
    Exp_name = args.exp_name
    Exp_folder = args.exp_path
    Exper_folder = '{}/{}'.format(Exp_folder, Exp_name)
    Exper_folder = Path(Exper_folder)
    run_analysis(Exper_folder, tasks=tasks, methods=Methods_list, seeds = Seeds, args=args)



================================================
FILE: demo/experiment_lsh_validity.py
================================================
import random
import string
import time
import uuid
import pandas as pd

from transopt.datamanager.manager import DataManager
from transopt.datamanager.database import Database
from transopt.utils.path import get_library_path

base_strings = {
    "finance": [
        "interest_rate",
        "loan_amount",
        "credit_score",
        "investment_return",
        "market_risk",
    ],
    "health": [
        "blood_pressure",
        "heart_rate",
        "cholesterol_level",
        "blood_sugar",
        "body_mass_index",
    ],
    "transportation": [
        "traffic_flow",
        "fuel_usage",
        "travel_time",
        "vehicle_capacity",
        "route_efficiency",
    ],
    "energy": [
        "power_consumption",
        "emission_level",
        "renewable_source",
        "energy_cost",
        "grid_stability",
    ],
    "education": [
        "student_performance",
        "teacher_ratio",
        "course_availability",
        "graduation_rate",
        "facility_utilization",
    ],
}


def generate_random_string(length):
    letters = string.ascii_lowercase
    return "".join(random.choice(letters) for i in range(length))


def generate_dataset_config():
    domain = random.choice(list(base_strings.keys()))
    num_variables = random.randint(3, 5)
    num_objectives = random.randint(1, 2)

    workload = random.randint(1, 5)
    problem_name = f"{domain}{generate_random_string(3)}"
    dataset_name = f"{problem_name}_{workload}_{uuid.uuid4().hex[:8]}"

    variables = []
    selected_base_strings = random.sample(base_strings[domain], k=num_variables)
    for base in selected_base_strings:
        random_suffix = generate_random_string(random.randint(1, 3))
        variable_name = f"{base}{random_suffix}"
        variables.append(
            {"name": variable_name, "type": "continuous"}
        )  # Assume all variables are float for simplicity

    objectives = [
        {"name": f"obj_{i}_{generate_random_string(3)}", "type": "minimize"}
        for i in range(num_objectives)
    ]
    fidelities = []  # No fidelities defined in your setup, can be adjusted if needed

    # Additional fields
    additional_config = {
        "problem_name": problem_name,
        "dim": num_variables,
        "obj": num_objectives,
        "fidelity": generate_random_string(random.randint(3, 6)),
        "workloads": workload,
        "budget_type": random.choice(["Num_FEs", "Hours", "Minutes", "Seconds"]),
        "budget": random.randint(1, 100),
    }

    return dataset_name, {
        "variables": variables,
        "objectives": objectives,
        "fidelities": fidelities,
        "additional_config": additional_config,
    }


def create_experiment_datasets(dm, num_datasets):
    for _ in range(num_datasets):
        dataset_name, dataset_cfg = generate_dataset_config()
        dm.create_dataset(dataset_name, dataset_cfg)


def get_shingles(text, ngram=5):
    return set(text[i : i + ngram] for i in range(len(text) - ngram + 1))


def cal_jacard_similarity(cfg1, cfg2):
    task_name1, variable_names1 = cfg1
    task_name2, variable_names2 = cfg2

    shingles1 = get_shingles(task_name1).union(get_shingles(variable_names1))
    shingles2 = get_shingles(task_name2).union(get_shingles(variable_names2))

    return len(shingles1.intersection(shingles2)) / len(shingles1.union(shingles2))


def validity_experiment(n_tables, num_replicates=3, jacard_lower_bound = 0.35):
    # Clean up the database
    db_path = get_library_path() / "exp_database.db"
    if db_path.exists():
        db_path.unlink()

    db = Database(db_path)
    dm = DataManager(db, num_hashes=100, char_ngram=5, num_bands=50)
    setup_start = time.time()
    create_experiment_datasets(dm, n_tables)
    setup_end = time.time()
    print(f"Generated {n_tables} datasets in {setup_end - setup_start} seconds")

    exec_time_jacard = []
    exec_time_lsh = []
    for _ in range(num_replicates):
        target_dataset_name, target_dataset_cfg = generate_dataset_config()
        print(
            f"Searching for similar datasets to {target_dataset_name}"
        )
        print("=====================================")

        task_name, var_names, num_var, num_obj = dm._construct_vector(
            target_dataset_cfg
        )

        start_jacard = time.time()
        similar_datasets_by_jacard = set()
        all_datasets = dm.get_all_datasets()
        for dataset in all_datasets:
            dataset_info = dm.get_dataset_info(dataset)
            task_name_tmp, var_names_tmp, num_var_tmp, num_obj_tmp = (
                dm._construct_vector(dataset_info)
            )
            if num_var != num_var_tmp or num_obj != num_obj_tmp:
                continue

            similarity = cal_jacard_similarity(
                (task_name, var_names), (task_name_tmp, var_names_tmp)
            )

            if similarity >= jacard_lower_bound:
                similar_datasets_by_jacard.add(dataset)

        end_jacard = time.time()
        exec_time_jacard.append(end_jacard - start_jacard)
        print(
            f"Found {len(similar_datasets_by_jacard)} similar datasets by jacard in {end_jacard - start_jacard} seconds"
        )

        start_lsh = time.time()
        similar_datasets = dm.search_similar_datasets(target_dataset_cfg)
        similar_datasets_by_lsh = set()
        for dataset in similar_datasets:
            dataset_info = dm.get_dataset_info(dataset)
            task_name_tmp, var_names_tmp, num_var_tmp, num_obj_tmp = (
                dm._construct_vector(dataset_info)
            )
            similarity = cal_jacard_similarity(
                (task_name, var_names), (task_name_tmp, var_names_tmp)
            )

            if similarity >= jacard_lower_bound:
                similar_datasets_by_lsh.add(dataset)

        end_lsh = time.time()
        exec_time_lsh.append(end_lsh - start_lsh)
        print(
            f"Found {len(similar_datasets_by_lsh)} similar datasets by lsh in {end_lsh - start_lsh} seconds"
        )
        print()

    dm.teardown()
    return exec_time_jacard, exec_time_lsh


if __name__ == "__main__":
    num_replicates = 20
    n_tables_list = [1000,2000, 3000,4000,5000,6000,7000,8000,10000]
    results_jacard = {}
    results_lsh = {}
    # results = []
    for n_tables in n_tables_list:
        exec_time_jacard, exec_time_lsh = validity_experiment(n_tables, num_replicates)
        # results.append(
        #     {
        #         "n_tables": n_tables,
        #         "exec_time_jacard": exec_time_jacard,
        #         "exec_time_lsh": exec_time_lsh,
        #     }
        # )
        print(f"n_tables: {n_tables} exec_time_jacard: {exec_time_jacard} exec_time_lsh {exec_time_lsh}")

        results_jacard[n_tables] = exec_time_jacard
        results_lsh[n_tables] = exec_time_lsh
        
        jacard_df = pd.DataFrame(results_jacard)
        lsh_df = pd.DataFrame(results_lsh)
        # 保存为CSV文件
        jacard_df.to_csv('jacard_exec_times.csv', index=False)
        lsh_df.to_csv('lsh_exec_times.csv', index=False)
        


================================================
FILE: demo/experiments.py
================================================
import logging
import os
import argparse
import sys

current_dir = os.path.dirname(os.path.abspath(__file__))
package_dir = os.path.dirname(current_dir)
sys.path.insert(0, package_dir)

from transopt.Benchmark import construct_test_suits
from optimizer.construct_optimizer import get_optimizer
from transopt.KnowledgeBase.kb_builder import construct_knowledgebase
from transopt.KnowledgeBase.TaskDataHandler import OptTaskDataHandler


os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"


def run_experiments(tasks, args):
    logger = logging.getLogger(__name__)
    kb = construct_knowledgebase(args)
    testsuits = construct_test_suits(tasks, args.seed)
    optimizer = get_optimizer(args)
    data_handler = OptTaskDataHandler(kb, args)
    optimizer.optimize(testsuits, data_handler)


if __name__ == "__main__":
    tasks = {
        # 'DBMS':{'budget': 11, 'time_stamp': 3},
        # 'GCC' : {'budget': 11, 'time_stamp': 3},
        # 'LLVM' : {'budget': 11, 'time_stamp': 3},
        'Ackley': {'budget': 11, 'workloads': [1,2,3], 'params':{'input_dim':1}},
        # 'MPB': {'budget': 110, 'time_stamp': 3},
        # 'Griewank': {'budget': 11, 'time_stamp': 3,  'params':{'input_dim':2}},
        # "AckleySphere": {"budget": 1000, "workloads":[1,2,3], "params": {"input_dim": 2}},
        # 'Lunar': {'budget': 110, 'time_stamp': 3},
        # 'XGB': {'budget': 110, 'time_stamp': 3},
    }

    parser = argparse.ArgumentParser(description="Process some integers.")
    parser.add_argument("-im", "--init_method", type=str, default="random")
    parser.add_argument("-in", "--init_number", type=int, default=7)
    parser.add_argument(
        "-p", "--exp_path", type=str, default=f"{package_dir}/../LFL_experiments"
    )
    parser.add_argument(
        "-n", "--exp_name", type=str, default="test"
    )  # 实验名称,保存在experiments中
    parser.add_argument("-s", "--seed", type=int, default=0)  # 设置随机种子,与迭代次数相关
    parser.add_argument(
        "-m", "--optimizer", type=str, default="MTBO"
    )  # 设置method:WS,MT,INC
    parser.add_argument("-v", "--verbose", type=bool, default=True)
    parser.add_argument("-norm", "--normalize", type=str, default="norm")
    parser.add_argument("-sm", "--save_mode", type=int, default=1)  # 控制是否保存模型
    parser.add_argument("-lm", "--load_mode", type=bool, default=False)  # 控制是否从头开始
    parser.add_argument(
        "-ac", "--acquisition_func", type=str, default="LCB"
    )  # 控制BO的acquisition function
    args = parser.parse_args()

    run_experiments(tasks, args)


================================================
FILE: demo/importances/cal_relationship.py
================================================
import sys
from pathlib import Path

current_path = Path(__file__).resolve().parent
package_path = current_path.parent.parent
sys.path.insert(0, str(package_path))

import json
from pathlib import Path

import cmasher as cmr
import dcor
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import numpy as np
import pandas as pd

target = "gcc"
results_path = package_path / "experiment_results"
gcc_comparsion_path = results_path / "gcc_archive_new"
gcc_samples_path = results_path / "gcc_samples"
llvm_comparsion_path = results_path / "llvm_archive"
llvm_samples_path = results_path / "llvm_samples"

pngs_path = package_path / "demo/importances/pngs"

mpl.rcParams['font.family'] = ['serif']
mpl.rcParams['font.serif'] = ['Times New Roman']

def load_and_prepare_data(file_path, objectives):
    """
    Loads JSON data and prepares a DataFrame.
    """
    with open(file_path, "r") as f:
        data = json.load(f)

    input_vectors = data["input_vector"]
    output_vectors = data["output_value"]

    df_input = pd.DataFrame(input_vectors)

    df_output = pd.DataFrame(output_vectors)[objectives]
    df_combined = pd.concat([df_input, df_output], axis=1)
    # print(f"Loaded {len(df_combined)} data points")

    df_combined = df_combined.drop_duplicates(subset=df_input.columns.tolist())
    # print(f"Removed {len(df_combined) - len(df_input)} duplicates")

    for obj in objectives:
        df_combined = df_combined[df_combined[obj] != 1e10]
    print(f"Loaded {len(df_combined)} data points after removing extreme values")
    return df_combined


def cal_dcor(df, objectives):
    """
    Calculate the distance correlation for each pair of objectives using the dcor library.
    """
    dcor_results = {}
    for i in range(len(objectives)):
        for j in range(i + 1, len(objectives)):
            obj1, obj2 = objectives[i], objectives[j]
            dcor_value = dcor.distance_correlation(df[obj1], df[obj2])
            dcor_results[f"{obj1}-{obj2}"] = dcor_value
    return dcor_results


def cal_spearman_corr(df, objectives):
    """
    Calculate the Spearman correlation for each pair of objectives.
    """

    corr_matrix = df[objectives].corr(method="spearman")

    spearman_results = {}
    for i in range(len(objectives)):
        for j in range(i + 1, len(objectives)):
            obj1, obj2 = objectives[i], objectives[j]
            corr_value = corr_matrix.at[obj1, obj2]
            spearman_results[f"{obj1}-{obj2}"] = corr_value

    return spearman_results


def cal_pearson_corr(df, objectives):
    """
    Calculate the Pearson correlation matrix for the given objectives and extract
    pairwise correlations from it.
    """
    corr_matrix = df[objectives].corr(method="pearson")

    pearson_results = {}
    for i in range(len(objectives)):
        for j in range(i + 1, len(objectives)):
            obj1, obj2 = objectives[i], objectives[j]
            corr_value = corr_matrix.at[obj1, obj2]
            pearson_results[f"{obj1}-{obj2}"] = corr_value

    return pearson_results


def generate_grid_plot(dcor_values_dict):
    workloads = list(dcor_values_dict.keys())
    objective_pairs = list(dcor_values_dict[workloads[0]].keys())

    dcor_matrix = np.zeros((len(workloads), len(objective_pairs)))

    for i, workload in enumerate(workloads):
        for j, pair in enumerate(objective_pairs):
            dcor_matrix[i, j] = dcor_values_dict[workload].get(pair, 0)

    plt.figure(figsize=(12, 10))  # Increase the height of the heatmap

    color_sequence = ["#edf8fb", "#ccece6", "#99d8c9", "#66c2a4", "#2ca25f", "#006d2c"]

    cmap = mcolors.LinearSegmentedColormap.from_list("mycmap", color_sequence)
    
    plt.imshow(dcor_matrix, cmap=cmr.fusion_r, interpolation="nearest")
    colorbar =plt.colorbar(shrink=0.57)  # Reduce the size of the colorbar
    
    # set font size of colorbar
    colorbar.ax.tick_params(labelsize=18)

    objective_pairs_short = ['ET-CS', 'ET-CT', 'CS-CT']

    plt.yticks(range(len(workloads)), workloads, fontsize=18)  # Adjust labels as needed
    plt.xticks(range(len(objective_pairs)), ['ET-CS', 'ET-CT', 'CS-CT'], rotation=45, fontsize=18)  # Rotation for better label visibility
    
    # plt.yticks(range(len(objective_pairs)), objective_pairs_short, fontsize=18)
    # plt.xticks(range(len(workloads)), ['1', '2', '3', '4', '5'], fontsize=18)

    plt.savefig(pngs_path / f"heatmap.pdf", format="pdf", bbox_inches="tight")


if __name__ == "__main__":
    gcc_workloads = [
        "cbench-consumer-tiff2rgba",
        "cbench-security-rijndael",
        "cbench-security-pgp",
        "cbench-automotive-qsort1",
        "cbench-automotive-susan-e",
        "cbench-consumer-jpeg-d",
        "cbench-security-sha",
        "cbench-telecom-adpcm-c",
        "cbench-telecom-adpcm-d",
        "cbench-telecom-gsm",
        "cbench-telecom-crc32",
        "cbench-consumer-tiff2bw",
        "cbench-consumer-mad",
        "cbench-network-patricia",
    ]

    objectives = ["execution_time", "file_size", "compilation_time"]

    # dcor_values_dict = {}
    # spearman_corr_dict = {}
    # pearson_corr_dict = {}
    # for workload in gcc_workloads:
    #     file_path = gcc_samples_path / f"GCC_{workload}.json"
    #     df = load_and_prepare_data(file_path, objectives)
    #     dcor_values = cal_dcor(df, objectives)
    #     spearman_corr = cal_spearman_corr(df, objectives)
    #     pearson_corr = cal_pearson_corr(df, objectives)
    #     print(f"dCor values for {workload}: {dcor_values}")
    #     print(f"Spearman correlation for {workload}: {spearman_corr}")

    #     dcor_values_dict[workload] = dcor_values
    #     spearman_corr_dict[workload] = spearman_corr
    #     pearson_corr_dict[workload] = pearson_corr

    # with open(pngs_path / "dcor_values_dict.json", "w") as f:
    #     json.dump(dcor_values_dict, f)

    # with open(pngs_path / "spearman_corr_dict.json", "w") as f:
    #     json.dump(spearman_corr_dict, f)

    # with open(pngs_path / "pearson_corr_dict.json", "w") as f:
    #     json.dump(pearson_corr_dict, f)

    # with open(pngs_path / "dcor_values_dict.json", "r") as f:
    #     dcor_values_dict = json.load(f)

    # with open(pngs_path / "spearman_corr_dict.json", "r") as f:
    #     spearman_corr_dict = json.load(f)

    # with open(pngs_path / "pearson_corr_dict.json", "r") as f:
    #     pearson_corr_dict = json.load(f)
    
    dcor_values_dict = {
        "telecom-adpcm-c": {
            "execution_time-file_size": 0.5096407431062894,
            "execution_time-compilation_time": 0.02156206023915185,
            "file_size-compilation_time": 0.028167304817522342,
        },
        "automotive-qsort1": {
            "execution_time-file_size": 0.24458686101114566,
            "execution_time-compilation_time": 0.4484640731112793,
            "file_size-compilation_time": 0.1319462835609861,
        },
        "network-patricia": {
            "execution_time-file_size": 0.3136478783871287,
            "execution_time-compilation_time": 0.11344940640932157,
            "file_size-compilation_time": 0.23628956882620056,
        },
        "telecom-gsm": {
            "execution_time-file_size": 0.3199972317712137,
            "execution_time-compilation_time": 0.19506712567511303,
            "file_size-compilation_time": 0.08086715789520826,
        },
        "consumer-tiff2rgba": {
            "execution_time-file_size": 0.19036475515437773,
            "execution_time-compilation_time": 0.18802272660380803,
            "file_size-compilation_time": 0.09256748900522595,
        },
    }

    generate_grid_plot(dcor_values_dict)


================================================
FILE: demo/importances/draw_obj_heatmap.py
================================================
import pandas as pd
import numpy as np
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import matplotlib as mpl
import dcor
import cmasher as cmr
import json
import sys
from pathlib import Path

current_path = Path(__file__).resolve().parent
package_path = current_path.parent.parent
sys.path.insert(0, str(package_path))


pngs_path = package_path / "demo/importances/pngs"

mpl.rcParams["font.family"] = ["serif"]
mpl.rcParams["font.serif"] = ["Times New Roman"]

def generate_grid_plot_combine(dcor_values_dicts):
    # 创建一个图和三个子图(对于三个数据集)
    fig, axs = plt.subplots(1, 3, figsize=(25, 10), constrained_layout=True)
    
    for ax, dcor_values_dict in zip(axs, dcor_values_dicts,):
        workloads = list(dcor_values_dict.keys())
        objective_pairs = list(dcor_values_dict[workloads[0]].keys())
    
        dcor_matrix = np.zeros((len(workloads), len(objective_pairs)))
    
        for i, workload in enumerate(workloads):
            for j, pair in enumerate(objective_pairs):
                dcor_matrix[i, j] = dcor_values_dict[workload].get(pair, 0)
    
        im = ax.imshow(dcor_matrix, cmap=cmr.prinsenvlag_r, interpolation="nearest", vmin=-0.6, vmax=0.6)
    
        ax.set_yticks(range(len(workloads)))
        ax.set_yticklabels(workloads, fontsize=36)
        ax.set_xticks(range(len(objective_pairs)))
        ax.set_xticklabels(objective_pairs, rotation=0, fontsize=36)
    
    cbar = fig.colorbar(im, ax=axs, shrink=1, location='right')
    cbar.ax.tick_params(labelsize=36)  # 设置 color bar 字体大小
    plt.savefig(pngs_path / "combined_heatmap.pdf", format="pdf", bbox_inches="tight")
    


def generate_grid_plot(dcor_values_dict, file_name):
    workloads = list(dcor_values_dict.keys())
    objective_pairs = list(dcor_values_dict[workloads[0]].keys())

    dcor_matrix = np.zeros((len(workloads), len(objective_pairs)))

    for i, workload in enumerate(workloads):
        for j, pair in enumerate(objective_pairs):
            dcor_matrix[i, j] = dcor_values_dict[workload].get(pair, 0)

    plt.figure(figsize=(12, 10))

    plt.imshow(dcor_matrix, cmap=cmr.prinsenvlag_r, interpolation="nearest", vmin=-0.6, vmax=0.6)
    colorbar = plt.colorbar(shrink=1)
    colorbar.ax.tick_params(labelsize=18)

    plt.yticks(range(len(workloads)), workloads, fontsize=18)
    plt.xticks(range(len(objective_pairs)),
               objective_pairs, rotation=0, fontsize=18)

    plt.savefig(pngs_path / f"{file_name}_heatmap.pdf", format="pdf", bbox_inches="tight")


if __name__ == "__main__":
    gcc_dcor_values_dict = {
        "adpcm-c": {"ET-CS": 0.5096407431062894, "ET-CT": 0.02156206023915185, "CS-CT": 0.028167304817522342},
        "qsort1": {"ET-CS": 0.24458686101114566, "ET-CT": 0.4484640731112793, "CS-CT": 0.1319462835609861},
        "patricia": {"ET-CS": 0.3136478783871287, "ET-CT": 0.11344940640932157, "CS-CT": 0.23628956882620056},
        "gsm": {"ET-CS": 0.3199972317712137, "ET-CT": 0.19506712567511303, "CS-CT": 0.08086715789520826},
        "tiff2rgba": {"ET-CS": 0.19036475515437773, "ET-CT": 0.18802272660380803, "CS-CT": 0.09256748900522595},
        "susan-e": {"ET-CS": 0.1362765512460971, "ET-CT": 0.36116979864249992, "CS-CT": 0.05943189644484737},
    }
    
    mysql_dcor_values_dict = {
        "SiBench": {"T-L": 0.4, "T-CU": 0.05, "L-CU": -0.13},
        "Voter": {"T-L": 0.2, "T-CU": 0.03, "L-CU": -0.14},
        "SmallBank": {"T-L": 0.6, "T-CU": 0.24, "L-CU": -0.35},
        "Twitter": {"T-L": 0.25, "T-CU": 0.43, "L-CU": -0.02},
        "TATP": {"T-L": 0.14, "T-CU": 0.05, "L-CU": -0.13},
        "TPC-C": {"T-L": 0.23, "T-CU": 0.16, "L-CU": -0.34},
    }

    hadoop_dcor_values_dict = {
        "WordCount": {"ET-CU": 0.5, "ET-MU": 0.14, "CU-MU": 0.03},
        "KMeans": {"ET-CU": 0.6, "ET-MU": 0.05, "CU-MU": 0.02},
        "Bayes": {"ET-CU": 0.4, "ET-MU": 0.23, "CU-MU": 0.4},
        "NWeight": {"ET-CU": 0.5, "ET-MU": 0.2, "CU-MU": 0.4},
        "PageRank": {"ET-CU": 0.13, "ET-MU": 0.35, "CU-MU": 0.16},
        "TeraSort": {"ET-CU": 0.4, "ET-MU": 0.16, "CU-MU": 0.15},
    }

    # generate_grid_plot(gcc_dcor_values_dict, "gcc")
    # generate_grid_plot(mysql_dcor_values_dict, "mysql")
    # generate_grid_plot(hadoop_dcor_values_dict, "hadoop")
    
    generate_grid_plot_combine([gcc_dcor_values_dict, mysql_dcor_values_dict, hadoop_dcor_values_dict])

================================================
FILE: demo/importances/get_feature_importances.py
================================================
import sys
from pathlib import Path

current_dir = Path(__file__).resolve().parent
package_dir = current_dir.parent.parent
sys.path.insert(0, str(package_dir))

import json
import os
import tarfile
from pathlib import Path

import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Lasso
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeRegressor

# from csstuning.compiler.compiler_benchmark import GCCBenchmark

# data_path = package_dir / "experiment_results" / "gcc_samples"
# data_path = package_dir / "experiment_results" / "gcc_samples"
data_path = package_dir / "experiment_results" / "dbms_sampling"


def load_and_prepare_data(file_path, objectives):
    """
    Loads JSON data and prepares a DataFrame.
    """
    with open(file_path, "r") as f:
        data = json.load(f)

    input_vectors = data["input_vector"]
    output_vectors = data["output_value"]

    df_input = pd.DataFrame(input_vectors)

    df_output = pd.DataFrame(output_vectors)[objectives]
    df_combined = pd.concat([df_input, df_output], axis=1)
    # print(f"Loaded {len(df_combined)} data points")

    df_combined = df_combined.drop_duplicates(subset=df_input.columns.tolist())
    # print(f"Removed {len(df_combined) - len(df_input)} duplicates")

    # for obj in objectives:
    #     df_combined = df_combined[df_combined[obj] != 1e10]
    # print(f"Loaded {len(df_combined)} data points after removing extreme values")
    return df_combined


def calculate_feature_importances(df, objective):
    """
    Calculates and returns feature importances.
    """
    X = df.drop([objective], axis=1)
    y = df[objective]

    model = DecisionTreeRegressor()
    model.fit(X, y)
    feature_importances = model.feature_importances_

    feature_importance_df = pd.DataFrame(
        {"Feature": X.columns, "Importance": feature_importances}
    )
    return feature_importance_df


def aggregate_importances(importances_list):
    """
    Aggregates a list of importance dataframes by taking the mean of importance scores across all repetitions.
    """
    combined_importances = pd.concat(importances_list)
    mean_importances = combined_importances.groupby("Feature").mean().reset_index()
    return mean_importances.sort_values(by="Importance", ascending=False)


def combine_and_rank_features(importances_list):
    """
    Combines feature importance dataframes and ranks features by total importance across all objectives.
    """
    combined = pd.concat(importances_list)
    combined = (
        combined.groupby("Feature")
        .sum()
        .sort_values(by="Importance", ascending=False)
        .reset_index()
    )
    return combined


def get_top_combined_features(common_features, combined_ranked, total_features=20):
    """
    Supplements the common features with additional features from the combined ranking to reach the desired total.
    """
    final_features = list(common_features)

    # Add more features from the combined ranked list until you reach 20
    for feature in combined_ranked["Feature"]:
        if len(final_features) < total_features:
            if (
                feature not in common_features
            ):  # Only add if not already in common_features
                final_features.append(feature)
        else:
            break  # Stop if we have already 20 features

    return final_features


def find_common_features(importances_list):
    """
    Finds the intersection of important features from multiple importance dataframes.
    """
    top_feature_sets = []

    for df in importances_list:
        # Sort by importance and select the top 20 features
        top_features = df.sort_values(by="Importance", ascending=False).head(20)
        # Add the set of top 20 feature names to the list
        top_feature_sets.append(set(top_features["Feature"]))
        
        # print importances
        print("Top 20 Features:")
        print(top_features)

    # Find intersection of all top feature sets
    common_features = set.intersection(*top_feature_sets)
  
    # # print feature and importances
    # print("Common Features:")
    # print(df[df["Feature"].isin(common_features)])
    
    return list(common_features)


def train_and_evaluate_model(
    df, features, objective, use_top_features=False, random_state=42
):
    """
    Trains and evaluates a model, either using top 20 features or all features.
    """
    X = df[features["Feature"]] if use_top_features else df.drop([objective], axis=1)
    y = df[objective]

    # Split and train the model
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
    model = DecisionTreeRegressor()
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)

    # Evaluate the model
    nrmse = np.sqrt(mean_squared_error(y_test, y_pred)) / np.std(y_test)
    feature_set = "Top 20 Features" if use_top_features else "All Features"
    print(f"{feature_set} - Normalized RMSE: {nrmse}")

    # Get and sort feature importances
    feature_importances = model.feature_importances_
    sorted_features = pd.DataFrame(
        {"Feature": X.columns, "Importance": feature_importances}
    ).sort_values(by="Importance", ascending=False)

    # print("Sorted Feature Importances:")
    # print(sorted_features)

    return nrmse


def get_workloads_improved():
    """
    Returns a list of workloads that improved when including objectives.
    """
    iterations = 1
    workloads_improved = []
    
    
    workloads_sampled = []
    
    for file in data_path.glob("*.json"):
        workload = file.name.split(".")[0][4:]
        
        workloads_sampled.append(workload)
        print("==================================================")
        print(workload)
        print("==================================================")

        # Initialize lists to store the results of repeated experiments
        nrmse_excluding_list = []
        nrmse_including_list = []

        for i in range(iterations):
            random_state = 42 + i
            print(f"Running iteration {i+1}/{iterations}...")

            # Repeat the experiment for 'excluding objectives'
            print("CART with top 20 features, excluding objectives")
            df_combined = load_and_prepare_data(file, objectives=["execution_time"])
            
        
            important_features = calculate_feature_importances(
                df_combined, "execution_time"
            )
            nrmse_excluding = train_and_evaluate_model(
                df_combined,
                important_features,
                "execution_time",
                use_top_features=True,
                random_state=random_state,
            )
            nrmse_excluding_list.append(nrmse_excluding)
            print("\n")

            # Repeat the experiment for 'including objectives'
            print("CART with top 20 features, including objectives")
            df_combined = load_and_prepare_data(
                file, objectives=["execution_time", "file_size", "compilation_time"]
            )
            important_features = calculate_feature_importances(
                df_combined, "execution_time"
            )
            nrmse_including = train_and_evaluate_model(
                df_combined,
                important_features,
                "execution_time",
                use_top_features=True,
                random_state=random_state,
            )
            nrmse_including_list.append(nrmse_including)
            print("\n")


        
        # Calculate average or median NRMSE for both configurations
        avg_nrmse_excluding = np.mean(nrmse_excluding_list)
        avg_nrmse_including = np.mean(nrmse_including_list)

        # Compare and record improvements
        if avg_nrmse_including < avg_nrmse_excluding:
            workloads_improved.append(workload)
        print(f"Average Improvement: {avg_nrmse_excluding - avg_nrmse_including}")
        print("\n\n")

    print(f"Workloads improved: {workloads_improved}")

    return workloads_improved


def get_features_for_exp(workloads, repetitions=5):
    features_by_workload = {}

    for workload in workloads:
        print("==================================================")
        print(workload)
        print("==================================================")
        data_file = data_path / f"DBMS_{workload}.json"
        # data_file = data_path / f"GCC_{workload}.json"
        # data_file = data_path / f"LLVM_{workload}.json"
        features_by_workload[workload] = {}

        # Calculate feature importances for each objective
        importances_et_all, importances_ct_all, importances_fs_all = [], [], []
        for _ in range(repetitions):
            # Repeat the experiment and append the results
            # df_combined = load_and_prepare_data(
            #     data_file, objectives=["execution_time"]
            # )
            # importances_et_all.append(
            #     calculate_feature_importances(df_combined, "execution_time")
            # )

            # df_combined = load_and_prepare_data(
            #     data_file, objectives=["compilation_time"]
            # )
            # importances_ct_all.append(
            #     calculate_feature_importances(df_combined, "compilation_time")
            # )

            # df_combined = load_and_prepare_data(data_file, objectives=["file_size"])
            # importances_fs_all.append(
            #     calculate_feature_importances(df_combined, "file_size")
            # )

            df_combined = load_and_prepare_data(
                data_file, objectives=["throughput"]
            )
            importances_et_all.append(
                calculate_feature_importances(df_combined, "throughput")
            )

            df_combined = load_and_prepare_data(
                data_file, objectives=["latency"]
            )
            importances_ct_all.append(
                calculate_feature_importances(df_combined, "latency")
            )


        # Aggregate the importances from all repetitions
        importances_et = aggregate_importances(importances_et_all)
        importances_ct = aggregate_importances(importances_ct_all)
        # importances_fs = aggregate_importances(importances_fs_all)

        # Find common features across all objectives
        common_features = find_common_features(
            [importances_et, importances_ct]
        )
        # print("Top 20 Features (Common):")
        # print(common_features)
        features_by_workload[workload]["common"] = common_features

        # Combine and rank features by total importance across all objectives
        combined_ranked = combine_and_rank_features(
            [importances_et, importances_ct]
        )
    
        # Get top combined features, ensuring we have 20 total
        top_features = get_top_combined_features(common_features, combined_ranked)

        # print("Top 20 Features (Common + Supplemented):")
        # print(top_features)
        features_by_workload[workload]["top"] = top_features

        # Write feature importances to file

    with open("features_by_workload.json", "w") as fp:
        json.dump(features_by_workload, fp, indent=4)

    # print("Features by workload written to features_by_workload.json")


if __name__ == "__main__":
    # workloads_improved = get_workloads_improved()

    # workloads_improved = [
    #     "cbench-security-sha",
    #     "cbench-telecom-crc32",
    #     "cbench-network-patricia",
    #     "cbench-office-stringsearch2",
    #     "cbench-bzip2",
    #     "cbench-security-rijndael",
    #     "cbench-automotive-bitcount",
    #     "cbench-consumer-tiff2bw",
    #     "cbench-security-pgp",  // Error compiled with LLVM
    #     "cbench-consumer-tiff2rgba",
    #     "cbench-automotive-susan-e",
    #     "cbench-telecom-adpcm-d",
    #     "cbench-telecom-adpcm-c",
    #     "cbench-telecom-gsm",
    # ]

    # GCC
    workloads_improved = [
        "cbench-consumer-tiff2rgba",
        "cbench-security-rijndael",
        "cbench-security-pgp",
        "cbench-automotive-qsort1",
        "cbench-automotive-susan-e",
        "cbench-consumer-jpeg-d",
        "cbench-security-sha",
        "cbench-telecom-adpcm-c",
        "cbench-telecom-adpcm-d",
        "cbench-telecom-gsm",
        
        "cbench-telecom-crc32",
        "cbench-consumer-tiff2bw",
        "cbench-consumer-mad",
        "cbench-network-patricia",

        # "polybench-cholesky",
        # "polybench-fdtd-apml",
        # "polybench-symm",
        # "polybench-ludcmp",
        # "polybench-lu",
        # "polybench-bicg",
        
        
        # "cbench-bzip2",
        # "cbench-office-stringsearch2",
    ]
    
    workloads_gcc_extra = [
        "polybench-3mm",
        "cbench-automotive-susan-c",
        "cbench-consumer-tiff2dither",
        "cbench-automotive-bitcount",
        "polybench-2mm",
        "polybench-adi",
        "cbench-office-stringsearch2",
        "polybench-fdtd-2d",
        "polybench-atax",
        "polybench-doitgen",
        "polybench-durbin",
        "polybench-fdtd-apml",
        "polybench-gemver",
        "polybench-gesummv",      
    ]
    
    # LLVM
    workloads_improved = [
        "cbench-telecom-gsm",
        "cbench-automotive-qsort1",
        "cbench-automotive-susan-e",
        "cbench-consumer-tiff2rgba",
        "cbench-network-patricia",
        "cbench-automotive-bitcount",
        "cbench-bzip2",
        "cbench-consumer-tiff2bw",
        "cbench-consumer-jpeg-d",
        "cbench-telecom-adpcm-c",
        "cbench-telecom-adpcm-d",
        "cbench-office-stringsearch2",
        "cbench-security-rijndael",
        "cbench-security-sha",
    ]
   
    workloads_dbms = [
        "sibench",
        "smallbank",
        "tatp",
        "tpcc",
        "twitter",
        "voter"
    ] 
    get_features_for_exp(workloads_dbms)


================================================
FILE: demo/jacard_exec_times.csv
================================================
1000,2000,3000,4000,5000,6000,7000,8000,10000
0.9119875431060791,2.082753896713257,3.91093111038208,8.557840585708618,12.041692018508911,15.124170541763306,19.65719509124756,23.64489245414734,34.56616735458374
1.0272552967071533,2.4850564002990723,3.6993303298950195,8.74350357055664,11.522526741027832,15.167948007583618,18.998569011688232,23.57488775253296,34.72972536087036
0.8701906204223633,2.342695474624634,3.9779343605041504,8.43106198310852,11.420814752578735,16.141853094100952,20.375312566757202,23.408360719680786,33.90605902671814
0.9109225273132324,2.144861936569214,3.3771023750305176,8.592206239700317,11.48600172996521,14.987765550613403,19.385486602783203,25.1193208694458,33.94826626777649
0.9816443920135498,2.1542670726776123,3.5521466732025146,8.509221315383911,11.628002643585205,14.858725309371948,19.51885724067688,24.59690284729004,33.040677547454834
0.9135477542877197,1.9848182201385498,3.598935127258301,8.426192045211792,11.95021915435791,15.61799430847168,19.14977765083313,24.9086012840271,33.726221799850464
0.8438146114349365,2.085094690322876,3.576479434967041,8.02638292312622,10.94989275932312,15.241029739379883,19.54108452796936,25.013059616088867,32.88712763786316
0.8688364028930664,2.180650472640991,3.596482276916504,8.604441404342651,11.128572225570679,15.095619678497314,18.685551643371582,25.701316595077515,34.67376089096069
0.8366460800170898,2.0028655529022217,3.4182419776916504,8.652469873428345,11.459563732147217,15.82176685333252,19.65123200416565,25.646445989608765,32.839797019958496
0.8481349945068359,2.1892166137695312,3.4449169635772705,8.446269035339355,11.985326766967773,14.893955945968628,19.16008687019348,25.730915069580078,32.78506088256836
0.9579446315765381,2.219630718231201,3.8541202545166016,8.73814082145691,11.567262411117554,15.347697496414185,19.48912000656128,25.423113346099854,33.98611545562744
0.9066781997680664,2.083017349243164,3.588240385055542,8.700974941253662,11.331908941268921,15.30103087425232,19.335123538970947,24.858865976333618,33.50572729110718
0.9746880531311035,2.2279622554779053,4.053081274032593,8.730259418487549,11.936275482177734,15.455094814300537,19.327856302261353,25.628153085708618,32.99900460243225
0.8930683135986328,2.11864972114563,3.7024741172790527,8.344207286834717,11.746542930603027,15.088658809661865,19.43919348716736,25.075968742370605,33.45540761947632
0.856304407119751,2.086219310760498,3.6951184272766113,8.34026026725769,11.830772161483765,14.942124843597412,18.9264714717865,25.86812424659729,33.858113288879395
0.8727133274078369,2.0580806732177734,3.7264912128448486,8.894163370132446,12.052824258804321,14.960201978683472,19.055952310562134,25.78945779800415,32.88934874534607
0.8761835098266602,2.228577136993408,3.4152305126190186,8.170103073120117,11.750890493392944,15.093035459518433,19.775015115737915,25.31575870513916,33.19825792312622
0.8753976821899414,2.107287645339966,3.421565532684326,8.800944566726685,12.298073530197144,15.2935950756073,21.14627766609192,25.744181156158447,33.204394578933716
0.8760173320770264,2.026487350463867,3.8564090728759766,9.433102130889893,11.492626905441284,15.162604808807373,20.254459142684937,25.106750011444092,33.05475950241089
0.8822832107543945,1.976762294769287,3.630432605743408,9.591833591461182,11.193760871887207,15.723769426345825,21.67072629928589,25.18179702758789,33.00413703918457


================================================
FILE: demo/lsh_exec_times.csv
================================================
1000,2000,3000,4000,5000,6000,7000,8000,10000
0.023784637451171875,0.08341550827026367,0.15126347541809082,0.33096837997436523,0.4911954402923584,0.515388011932373,0.690263032913208,0.9283351898193359,1.3463435173034668
0.03152871131896973,0.10286164283752441,0.15152525901794434,0.3354344367980957,0.3960435390472412,0.5925371646881104,0.8368349075317383,0.7942116260528564,1.0759913921356201
0.029398441314697266,0.09281778335571289,0.14369988441467285,0.33049607276916504,0.41103291511535645,0.5936400890350342,0.6794371604919434,0.8794887065887451,1.1260664463043213
0.03680753707885742,0.0930788516998291,0.1341261863708496,0.29814672470092773,0.4796781539916992,0.5141637325286865,0.6429910659790039,0.8827624320983887,1.1970088481903076
0.04126882553100586,0.058476924896240234,0.1401991844177246,0.2537209987640381,0.4159054756164551,0.572455883026123,0.6010398864746094,0.8189992904663086,1.230595350265503
0.03595137596130371,0.08831262588500977,0.11515688896179199,0.28495216369628906,0.378218412399292,0.5232009887695312,0.8514833450317383,0.9663751125335693,1.2129037380218506
0.04663252830505371,0.09302330017089844,0.13613414764404297,0.26587772369384766,0.45294785499572754,0.5516300201416016,0.6722183227539062,0.8564984798431396,1.0223979949951172
0.019811153411865234,0.06587505340576172,0.12438511848449707,0.3414583206176758,0.4270482063293457,0.6210315227508545,0.6754748821258545,0.9284231662750244,1.2596681118011475
0.032245635986328125,0.07642269134521484,0.13181066513061523,0.2506859302520752,0.46262025833129883,0.5530803203582764,0.6569054126739502,0.8058040142059326,1.119572639465332
0.04127001762390137,0.0894927978515625,0.13326454162597656,0.3099794387817383,0.42696142196655273,0.5447602272033691,0.6076810359954834,0.9187808036804199,1.1909763813018799
0.03832602500915527,0.08265113830566406,0.11252927780151367,0.2969646453857422,0.4582955837249756,0.5136613845825195,0.5503523349761963,0.9396407604217529,1.115455150604248
0.03364896774291992,0.07457470893859863,0.211561918258667,0.3259725570678711,0.41351938247680664,0.5627808570861816,0.7018880844116211,0.8254928588867188,1.072371482849121
0.06022810935974121,0.10837268829345703,0.14995479583740234,0.3509492874145508,0.45815014839172363,0.5309557914733887,0.5962278842926025,0.8092830181121826,1.0732884407043457
0.03121781349182129,0.07620978355407715,0.1468205451965332,0.33223938941955566,0.37134265899658203,0.5799720287322998,0.6643342971801758,0.8704044818878174,1.0601885318756104
0.03435707092285156,0.06217193603515625,0.13513994216918945,0.3275594711303711,0.35704994201660156,0.47503113746643066,0.644827127456665,0.8879103660583496,1.2539973258972168
0.03912711143493652,0.08903884887695312,0.12429618835449219,0.25405335426330566,0.5083765983581543,0.5449907779693604,0.7324926853179932,0.8082249164581299,1.013197660446167
0.03409695625305176,0.06731986999511719,0.1349468231201172,0.30829381942749023,0.42665600776672363,0.5346364974975586,0.6694869995117188,1.0194809436798096,1.2974910736083984
0.04721331596374512,0.07333040237426758,0.13997197151184082,0.28172802925109863,0.40156102180480957,0.6314287185668945,0.8593540191650391,0.8863487243652344,1.231271743774414
0.03896808624267578,0.08074784278869629,0.14035487174987793,0.3700544834136963,0.40504980087280273,0.581559419631958,0.7495737075805664,0.8647575378417969,1.1768264770507812
0.03593301773071289,0.08340692520141602,0.13669967651367188,0.28766536712646484,0.38652944564819336,0.5708668231964111,0.7684242725372314,0.9323217868804932,1.3054776191711426


================================================
FILE: demo/random_sample_compiler.py
================================================
import os
import sys

current_dir = os.path.dirname(os.path.abspath(__file__))
package_dir = os.path.dirname(current_dir)
sys.path.insert(0, package_dir)

import argparse
import datetime

import numpy as np
from csstuning.compiler.compiler_benchmark import CompilerBenchmarkBase

from transopt.Benchmark import construct_test_suits
from transopt.KnowledgeBase.kb_builder import construct_knowledgebase
from transopt.KnowledgeBase.TaskDataHandler import OptTaskDataHandler
from optimizer.construct_optimizer import get_optimizer

os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"


def run_experiments(tasks, args):
    kb = construct_knowledgebase(args)
    testsuits = construct_test_suits(tasks, args.seed)
    optimizer = get_optimizer(args)
    data_handler = OptTaskDataHandler(kb, args)
    optimizer.optimize(testsuits, data_handler)


def split_into_segments(lst, n):
    k, m = divmod(len(lst), n)
    return [lst[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "samples_num",
        type=int,
        help="Number of samples to be collected for each workload",
    )
    parser.add_argument(
        "--split_index",
        type=int,
        help="Index for splitting the workload segments",
        default=0,
    )
    args = parser.parse_args()
    split_index = args.split_index
    samples_num = args.samples_num

    available_workloads = CompilerBenchmarkBase.AVAILABLE_WORKLOADS
    collected_workloads = [
        "cbench-automotive-susan-c",
        "cbench-automotive-bitcount",
        "cbench-security-rijndael",
        "cbench-consumer-tiff2rgba",
        "cbench-telecom-adpcm-d",
        "cbench-consumer-tiff2bw",
        "cbench-telecom-adpcm-c",
        "cbench-consumer-tiff2dither",
        "cbench-telecom-gsm",
        "cbench-automotive-susan-e",
        "cbench-security-sha",
        "cbench-network-patricia",
        "cbench-telecom-crc32",
        "cbench-security-pgp",
        "cbench-consumer-mad",
        "cbench-automotive-qsort1",
        "polybench-cholesky",
        "polybench-trisolv",
        "polybench-adi",
        "polybench-symm",
        "polybench-gesummv",
        "polybench-gemver",
        "polybench-durbin",
        "polybench-atax",
        "polybench-fdtd-apml",
        "polybench-jacobi-1d-imper",
        "polybench-bicg",
        "polybench-syr2k",
        "polybench-mvt",
        "polybench-lu",
        "polybench-3mm",
    ]
    available_workloads = list(set(available_workloads) - set(collected_workloads))

    split_workloads = split_into_segments(available_workloads, 10)

    if split_index >= len(split_workloads):
        raise IndexError("split index out of range")

    workloads = split_workloads[split_index]

    tasks = {
        "GCC": {"budget": samples_num, "workloads": workloads},
        # "LLVM": {"budget": samples_num, "workloads": workloads},
    }

    # Get date and set exp name
    date = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
    exp_name = f"sampling_compiler_{date}"

    args = argparse.Namespace(
        seed=0,
        optimizer="ParEGO",
        init_number=2,
        init_method="random",
        exp_path=f"{package_dir}/../experiment_results",
        exp_name=exp_name,
        verbose=True,
        normalize="norm",
        source_num=2,
        selector="None",
        save_mode=1,
        load_mode=False,
        acquisition_func="LCB",
    )

    run_experiments(tasks, args)


================================================
FILE: demo/random_sample_dbms.py
================================================
current_dir = os.path.dirname(os.path.abspath(__file__))
package_dir = os.path.dirname(current_dir)
sys.path.insert(0, package_dir)

import argparse
import datetime
import os
import sys

import numpy as np
from csstuning.dbms.dbms_benchmark import MySQLBenchmark

from transopt.benchmark import instantiate_problems
from transopt.KnowledgeBase.kb_builder import construct_knowledgebase
from transopt.KnowledgeBase.TaskDataHandler import OptTaskDataHandler
from optimizer.construct_optimizer import get_optimizer

os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"


def run_experiments(tasks, args):
    kb = construct_knowledgebase(args)
    testsuits = instantiate_problems(tasks, args.seed)
    optimizer = get_optimizer(args)
    data_handler = OptTaskDataHandler(kb, args)
    optimizer.optimize(testsuits, data_handler)


def split_into_segments(lst, n):
    k, m = divmod(len(lst), n)
    return [lst[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--samples_num", type=int, help="Number of samples to be collected for each workload", default=6
    )
    parser.add_argument(
        "--split_index", type=int, help="Index for splitting the workload segments", default=0
    )
    args = parser.parse_args()
    split_index = args.split_index
    samples_num = args.samples_num
    
    available_workloads = MySQLBenchmark.AVAILABLE_WORKLOADS
    split_workloads = split_into_segments(available_workloads, 6)

    if split_index >= len(split_workloads):
        raise IndexError("split index out of range")

    workloads = split_workloads[split_index]

    tasks = {
        "DBMS": {"budget": samples_num, "workloads": workloads},
    }

    # Get date and set exp name
    date = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
    exp_name = f"sampling_dbms_{date}"

    args = argparse.Namespace(
        seed=0,
        optimizer="ParEGO",
        init_number=2,
        init_method="random",
        exp_path=f"{package_dir}/../experiment_results",
        exp_name=exp_name,
        verbose=True,
        normalize="norm",
        source_num=2,
        selector="None",
        save_mode=1,
        load_mode=False,
        acquisition_func="LCB",
    )

    run_experiments(tasks, args)


================================================
FILE: demo/sampling/random_sample_compiler.py
================================================
import os
import sys
from pathlib import Path

current_dir = Path(__file__).resolve().parent
package_dir = current_dir.parent.parent
sys.path.insert(0, str(package_dir))

import argparse
import datetime

import numpy as np
from csstuning.compiler.compiler_benchmark import CompilerBenchmarkBase

from transopt.benchmark import instantiate_problems
from transopt.KnowledgeBase.kb_builder import construct_knowledgebase
from transopt.KnowledgeBase.TransferDataHandler import OptTaskDataHandler
from optimizer.construct_optimizer import get_optimizer

os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"


def run_experiments(tasks, args):
    kb = construct_knowledgebase(args)
    testsuits = instantiate_problems(tasks, args.seed)
    optimizer = get_optimizer(args)
    data_handler = OptTaskDataHandler(kb, args)
    optimizer.optimize(testsuits, data_handler)


def split_into_segments(lst, n):
    k, m = divmod(len(lst), n)
    return [lst[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--samples_num",
        type=int,
        help="Number of samples to be collected for each workload",
        default=5000,
    )
    parser.add_argument(
        "--split_index",
        type=int,
        help="Index for splitting the workload segments",
        default=0,
    )
    args = parser.parse_args()
    split_index = args.split_index
    samples_num = args.samples_num

    available_workloads = CompilerBenchmarkBase.AVAILABLE_WORKLOADS
    # available_workloads = [
    #     "polybench-jacobi-2d-imper",
    #     "polybench-dynprog",
    #     "polybench-medley-reg-detect",
    #     "polybench-trmm",
    #     "polybench-gemm",
    #     "cbench-automotive-susan-s",
    #     "cbench-network-dijkstra",
    #     "cbench-consumer-jpeg-c",
    #     "cbench-bzip2",
    # ]

    split_workloads = split_into_segments(available_workloads, 10)

    if split_index >= len(split_workloads):
        raise IndexError("split index out of range")

    workloads = split_workloads[split_index]

    tasks = {
        # "GCC": {"budget": samples_num, "workloads": workloads},
        "LLVM": {"budget": samples_num, "workloads": workloads},
    }

    # Get date and set exp name
    date = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
    exp_name = f"sampling_compiler_{date}"

    args = argparse.Namespace(
        seed=0,
        optimizer="ParEGO",
        init_number=100,
        init_method="random",
        exp_path=f"{package_dir}/experiment_results",
        exp_name=exp_name,
        verbose=True,
        normalize="norm",
        source_num=2,
        selector=None,
        save_mode=1,
        load_mode=False,
        acquisition_func="LCB",
    )

    run_experiments(tasks, args)


================================================
FILE: demo/sampling/random_sample_dbms.py
================================================
import os
import sys
from pathlib import Path

current_dir = Path(__file__).resolve().parent
package_dir = current_dir.parent.parent
sys.path.insert(0, str(package_dir))

import argparse
import datetime
import os
import sys

import numpy as np
from csstuning.dbms.dbms_benchmark import MySQLBenchmark

from transopt.benchmark import instantiate_problems
from transopt.KnowledgeBase.kb_builder import construct_knowledgebase
from transopt.KnowledgeBase.TransferDataHandler import OptTaskDataHandler
from optimizer.construct_optimizer import get_optimizer

os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"


def run_experiments(tasks, args):
    kb = construct_knowledgebase(args)
    testsuits = instantiate_problems(tasks, args.seed)
    optimizer = get_optimizer(args)
    data_handler = OptTaskDataHandler(kb, args)
    optimizer.optimize(testsuits, data_handler)


def split_into_segments(lst, n):
    k, m = divmod(len(lst), n)
    return [lst[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--samples_num", type=int, help="Number of samples to be collected for each workload", default=10
    )
    parser.add_argument(
        "--split_index", type=int, help="Index for splitting the workload segments", default=0
    )
    args = parser.parse_args()
    split_index = args.split_index
    samples_num = args.samples_num
    
    available_workloads = MySQLBenchmark.AVAILABLE_WORKLOADS
    split_workloads = split_into_segments(available_workloads, 6)

    if split_index >= len(split_workloads):
        raise IndexError("split index out of range")

    workloads = split_workloads[split_index]

    tasks = {
        "DBMS": {"budget": samples_num, "workloads": workloads},
    }

    # Get date and set exp name
    date = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
    exp_name = f"sampling_dbms_{date}"

    args = argparse.Namespace(
        seed=0,
        optimizer="ParEGO",
        init_number=10,
        init_method="random",
        exp_path=f"{package_dir}/experiment_results",
        exp_name=exp_name,
        verbose=True,
        normalize="norm",
        source_num=2,
        selector=None,
        save_mode=1,
        load_mode=False,
        acquisition_func="LCB",
    )

    run_experiments(tasks, args)


================================================
FILE: docs/Makefile
================================================
# Minimal makefile for Sphinx documentation
#

# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS    ?=
SPHINXBUILD   ?= sphinx-build
SOURCEDIR     = source
BUILDDIR      = build

# Put it first so that "make" without argument is like "make help".
help:
	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

.PHONY: help Makefile

# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)


================================================
FILE: docs/make.bat
================================================
@ECHO OFF

pushd %~dp0

REM Command file for Sphinx documentation

if "%SPHINXBUILD%" == "" (
	set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=source
set BUILDDIR=build

%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
	echo.
	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
	echo.installed, then set the SPHINXBUILD environment variable to point
	echo.to the full path of the 'sphinx-build' executable. Alternatively you
	echo.may add the Sphinx directory to PATH.
	echo.
	echo.If you don't have Sphinx installed, grab it from
	echo.https://www.sphinx-doc.org/
	exit /b 1
)

if "%1" == "" goto help

%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end

:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%

:end
popd


================================================
FILE: docs/source/_static/custom.css
================================================
.bd-sidebar-secondary {
    display: none !important;
}

/* 让 bd-article 占据 100% 宽度 */
.bd-main .bd-content {
    flex-grow: 1;
    max-width: 100%;
    width: 100%;
}

.bd-article-container {
    max-width: 100% !important;
    width: 100% !important;
}


.bd-article {
    max-width: 100% !important;
    width: 100% !important;
}

.bd-sidebar-primary {
    flex: 0 0 250px; /* 减小宽度 */
    max-width: 250px;
    padding: 0;
}

.bd-page-width {
    max-width: 100% !important;
    padding-left: 0 !important;
    padding-right: 0 !important;
}

================================================
FILE: docs/source/conf.py
================================================
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html

# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
import os
import sys
from os.path import dirname



SOURCE = os.path.dirname(os.path.realpath(__file__))



sys.path.insert(0, SOURCE)

project = 'TransOPT: Transfer Optimization System for Bayesian Optimization Using Transfer Learning'
copyright = '2024, Peili Mao'
author = 'Peili Mao'
release = '0.1.0'




# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration

extensions =[
    'sphinx.ext.autodoc',
    'sphinx.ext.napoleon',
    'sphinx_rtd_theme',
    'sphinxcontrib.bibtex',
    
    'sphinx_togglebutton',
    
    'sphinx.ext.mathjax',
    'sphinx.ext.autosummary',
    # 'numpydoc',
    # 'nbsphinx',
    'sphinx.ext.intersphinx',
    'sphinx.ext.coverage',
    # 'matplotlib.sphinxext.plot_directive',
    ]

templates_path = ['_templates']
exclude_patterns = []

bibtex_bibfiles = ['usage/TOS.bib']

html_logo = "_static//figures/transopt_logo.jpg"
# html_favicon = '_static/favicon.ico'


# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output

html_theme = 'sphinx_book_theme'
html_static_path = ['_static']
html_css_files = [
    'custom.css',
]



master_doc = 'index'

================================================
FILE: docs/source/development/api_reference.rst
================================================
API Reference
=============

This section provides a detailed reference for the TransOPT API, including descriptions of all available endpoints and methods.

.. automodule:: transopt
   :members:

================================================
FILE: docs/source/development/architecture.rst
================================================
Architecture Overview
======================

This section provides an overview of the architecture of the TransOPT software, illustrating the key components and workflows involved in its operation.

System Architecture
-------------------

The following diagram provides a high-level view of the entire system architecture of TransOPT, showing the interaction between various components.

.. image:: ../images/system_architecture.pdf
   :alt: System Architecture Diagram
   :width: 600px
   :align: center

Workflow
--------

The workflow for using TransOPT is illustrated below. This diagram shows the typical steps a user would follow when working with TransOPT, from defining the problem to obtaining the optimization results.

.. image:: ../images/workflow.pdf
   :alt: TransOPT Workflow
   :width: 600px
   :align: center

Optimizer Architecture
----------------------

TransOPT includes different optimization algorithms. The following diagram highlights the difference between the standard Bayesian Optimization (BO) and Transfer Learning for Bayesian Optimization (TLBO).

### BO vs. Transfer BO

.. image:: ../images/bo_vs_tlbo.pdf
   :alt: BO vs. Transfer BO
   :width: 600px
   :align: center

### Optimizer Workflow

The diagram below illustrates the workflow of the optimizer component within TransOPT, showing how it integrates with other system components.

.. image:: ../images/optimizer.pdf
   :alt: Optimizer Workflow
   :width: 600px
   :align: center

Data Management
---------------

Data management is a critical component of TransOPT, handling the storage, retrieval, and processing of data required for optimization tasks. The following diagram provides an overview of how data is managed within the system.

.. image:: ../images/data_management.pdf
   :alt: Data Management Overview
   :width: 600px
   :align: center

Conclusion
----------

The architecture of TransOPT is designed to be modular and flexible, allowing for easy integration of new algorithms and data management strategies. This overview provides a snapshot of the system's key components and their interactions, setting the stage for more detailed exploration in subsequent sections.



================================================
FILE: docs/source/faq.rst
================================================
FAQ
================================

This section addresses common questions and issues that users might encounter when using TransOPT.

How do I submit the error information to the maintainer?
--------------------------------------------------------
Click on the `Submit error` button on the bottom right corner of the dashboard page. Type in the error information and click on the `Submit` button, the error 
information will be sent to the maintainer.



How do I report a bug?
----------------------
1. Clone the repository:

   ::

     $ export NODE_OPTIONS=--openssl-legacy-provider



================================================
FILE: docs/source/home/feature.html
================================================
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.8.1/css/all.css"
      integrity="sha384-50oBUHEmvpQ+1lW4y57PTFmhCaXp0ML5d60M1M7uH2+nqUivzIebhndOJK28anvf" crossorigin="anonymous">

<style>
    #wrapper h4 {
        margin: 5px 0px 15px 0px;
        font-weight: 400;
    }

    .entry {
        height: 100%;
        width: 100%;
    }

    .border {
        border: 1px solid #DCDCDC;
    }

    .icon {
        margin-top: 5px;
    }

    .entry:hover {
        background: #DCDCDC;
        cursor: pointer;
    }
</style>

<div id="wrapper">
    <div class="row row-eq-height">
        <div class="col-12 col-lg-6 p-1">
            <div class="entry border p-3">
                <div class="d-flex flex-row">
                    <div class="icon col-2">
                        <i class="fas fa-cogs fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading">Composite Algorithm Design</p>
                        <a>Search Space:</a> <a href="usage/algorithms.html"> Automated prune search space, ...</a><br>
                        <a>Initialization:</a> <a href="usage/algorithms.html"> Meta-learn based initialization, EA-based initialization ...</a><br>
                        <a>Surrogate Model:</a> <a href="usage/algorithms.html"> MTGP, RGPE, Neural Process, ...</a><br>
                        <a>Acquisition Function:</a> <a href="usage/algorithms.html"> Transfre acquisition function, RL acquisition function, ...</a><br>
                    </div>
                </div>
            </div>
        </div>

        <div class="col-12 col-lg-6 p-1">
            <div class="entry border p-3">
                <div class="d-flex flex-row">
                    <div class="icon col-2">
                        <i class="fas fa-database fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading"><a>Robust Data Management</a></p>
                        <a>Embedded Database:</a> <a href="usage/algorithms.html"> Utilizes SQLite as an embedded database, enabling seamless data management.</a><br>
                        <a>Integration with External Datasets:</a> <a href="usage/algorithms.html"> Allows integration of public datasets to enhance analysis.</a><br>
                        <a>Data Retrieve:</a> <a href="usage/algorithms.html"> Local-sensitivity based data retrieval approach.</a><br>
                    </div>
                </div>
            </div>
        </div>
    </div>

    <div class="row row-eq-height">
        <div class="col-12 col-lg-6 p-1">
            <div class="entry border p-3">
                <div class="d-flex flex-row">
                    <div class="icon col-2">
                        <i class="fas fa-tasks fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading"><a>Benchmark Problems</a></p>
                        <a>Synthetic Problems:</a> <a href="usage/problems.html"> Ackley, ...</a><br>
                        <a>Configurable Software Tuning:</a> <a href="usage/problems.html">GCC, LLVM, MySQL, Hadoop, ...</a><br>
                        <a>Hyperparameter Optimization:</a> <a href="usage/problems.html"> ResNet, DenseNet, AlexNet, ...</a><br>
                        <a>Protein Inverse Folding:</a> <a href="usage/problems.html">Protein Data Bank, CATH, ...</a>,
                        <a>RNA Inverse Design:</a> <a href="usage/problems.html">Eterna100, RNAStralign, Rfam-learn, ...</a>,
                    </div>
                </div>
            </div>
        </div>

        <div class="col-12 col-lg-6 p-1">
            <div class="entry border p-3">
                <div class="d-flex flex-row">
                    <div class="icon col-2">
                        <i class="fas fa-globe fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading"><a>Web User Interface</a></p>
                        <a>Intuitive Navigation:</a> <a href="usage/results.html"> Clear menus and sidebars for easy access to features.</a><br>
                        <a>Interactive Data Visualization:</a> <a href="usage/results.html"> Real-time charts and graphs for data results.</a><br>
                        <a>Data Upload Functionality:</a> <a href="usage/results.html"> Direct upload of datasets for transfer and optimization.</a><br>
                        <a>LLM-powered-chatbot:</a> <a href="usage/results.html">Enables natural language interaction.</a><br>
                    </div>
                </div>
            </div>
        </div>
    </div>

    <div class="row row-eq-height">
        <div class="col-12 col-lg-6 p-1">
            <div class="entry border p-3">
                <div class="d-flex flex-row">
                    <div class="icon col-2">
                        <i class="fas fa-chart-bar fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading"><a>Results Analysis</a></p>
                        <a>Performance Indicator:</a> <a href="usage/results.html"> MAE, GC-content, Max RSS, ...</a><br>
                        <a>Statistical Measures:</a> <a href="usage/results.html"> Wilcoxon signed-rank test, Scott-Knott test, Critical difference, ...</a><br>
                        <a>Visualization:</a> <a href="usage/results.html"> Optimization trajectory, Multidimensional scaling, ...</a><br>

                    </div>
                </div>
            </div>
        </div>

        <div class="col-12 col-lg-6 p-1">
            <div class="entry border p-3">
                <div class="d-flex flex-row">
                    <div class="icon col-2">
                        <i class="fas fa-puzzle-piece fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading">More features are coming soon</p>
                        <p>...</p>
                    </div>
                </div>
            </div>
        </div>
    </div>
</div>


================================================
FILE: docs/source/home/guide.html
================================================
<style>

    .zoom:hover {
        transform: scale(1.07);
    }
</style>


<div class="container">
    <div class="row row-eq-height">


        <div class="col-md d-flex my-1 mx-2 overflow-hidden">
            <div class="card">
                <a href="installation.html"><img class="card-img-top w-100 zoom"
                                                          src="_static/figures/giant.png"
                                                          alt="Icon Getting Started"></a>
                <div class="card-body">
                    <p class="card-text"><b>Getting Started:</b> The key steps in using TransOPT: Installation, Algorithm Selection, Benchmarking Problems, Visualization, and Data Management for effective transfer learning optimization.</p>
                </div>
            </div>
        </div>


        <div class="col-md d-flex my-1 mx-2 overflow-hidden">
            <div class="card">
                <a href="https://colalab.ai/" data-toggle="modal" data-target="#colalab"><img class="card-img-top w-100 zoom"
                                                                              src="_static/figures/colalab.png"
                                                                              alt="Icon colalab"></a>
                <div class="card-body">
                    <p class="card-text"><b>About Us:</b> COLA laboratory is working in computational/artificial intelligence, multi-objective optimization and decision-making, operational research...</p>
                </div>
            </div>
        </div>

        <div class="col-md d-flex my-1 mx-2 overflow-hidden">
            <div class="card">
                <a href=""><img class="card-img-top w-100 zoom"
                                                                              src="_static/figures/research.png"
                                                                              alt="Research with this Package"></a>
                <div class="card-body">
                    <p class="card-text"><b>News:</b> 
                        Our system has been applied in various studies, including protein design, hyperparameter optimization... </p>
                </div>
            </div>
        </div>


    </div>
</div>

================================================
FILE: docs/source/home/portfolio.html
================================================
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.8.1/css/all.css"
      integrity="sha384-50oBUHEmvpQ+1lW4y57PTFmhCaXp0ML5d60M1M7uH2+nqUivzIebhndOJK28anvf" crossorigin="anonymous">

<style>

    #wrapper h4 {
        margin: 5px 0px 15px 0px;
        font-weight: 400;
    }

    .entry {
        height: 100%;
        width: 100%;
    }


    .border {
        border: 1px solid #DCDCDC;
    }

    .icon {
        margin-top: 5px;
    }

    .entry:hover {
        background: #DCDCDC;
        cursor: pointer;
    }




</style>

<div id="wrapper">

    <div class="row row-eq-height">

        <div class="col-12 col-lg-6 p-1" onclick="location.href='interface/index.html';">
            <div class="entry border p-3">
                <div class="d-flex flex-row">
                    <div class="icon col-2">
                        <i class="fas fa-bullhorn fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading"><a href="interface/index.html">Interface</a></p>

                        <b>Function:</b>
                        <a href="interface/minimize.html">minimize</a>
                        <p style="margin-bottom:0.4em;"></p><b>Parameters:</b>
                        <a href="problems/index.html">Problem</a>,
                        <a href="algorithms/index.html">Algorithm</a>,
                        <a href="interface/termination.html">Termination</a>
                        <p style="margin-bottom:0.4em;"></p>

                        <b>Optionals:</b>
                        <a href="interface/callback.html">Callback</a>,
                        <a href="interface/display.html">Display</a>,
                        <a href="interface/minimize.html">...</a>

                        <p style="margin-bottom:0.4em;"></p>

                        <b>Returns:</b> <a href="interface/result.html">Result</a>

                        <br>
                        <p style="margin-bottom:0.7em;"></p>

                        <b>Related:</b>
                        <a href="algorithms/usage.html">Ask and Tell</a><img class="new-flag" src="_static/new-flag.svg">,
                        <a href="misc/checkpoint.html">Checkpoints</a>
                    </div>
                </div>
            </div>
        </div>

        <div class="col-12 col-lg-6 p-1" onclick="location.href='problems/index.html';">
            <div class="entry border p-3">
                <div class="d-flex flex-row">

                    <div class="icon col-2">
                        <i class="fas fa-chess fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading"><a href="problems/index.html">Problems</a></p>

                        <b>Single-objective:</b>
                        <a href="problems/single/ackley.html">Ackley</a>,
                        <a href="problems/single/griewank.html">Griewank</a>,
                        <a href="problems/single/rastrigin.html">Rastrigin</a>,
                        <a href="problems/single/rosenbrock.html">Rosenbrock</a>,
                        <a href="problems/single/zakharov.html">Zakharov</a>,
                        <a href="problems/index.html#Single-Objective">...</a>
                        <br>
                        <p style="margin-bottom:0.4em;"></p>


                        <b>Multi-objective:</b>
                        <a href="problems/multi/bnh.html">BNH</a>,
                        <a href="problems/multi/osy.html">OSY</a>,
                        <a href="problems/multi/tnk.html">TNK</a>,
                        <a href="problems/multi/truss2d.html">Truss2d</a>,
                        <a href="problems/multi/welded_beam.html">Welded Beam</a>,
                        <a href="problems/multi/zdt.html">ZDT</a>,
                        <a href="problems/index.html#Multi-Objective">...</a>
                        <br>
                        <p style="margin-bottom:0.4em;"></p>


                        <b>Many-objective:</b>
                        <a href="problems/many/dtlz.html">DTLZ</a>,
                        WFG
                        <br>
                        <p style="margin-bottom:0.7em;"></p>

                        <b>Constrained:</b>
                        CTP,
                        <a href="problems/constrained/dascmop.html">DASCMOP</a>,
                        <a href="problems/constrained/modact.html">MODAct</a>,
                        <a href="problems/constrained/mw.html">MW</a>,
                        CDTLZ
                        <br>
                        <p style="margin-bottom:0.4em;"></p>

                        <b>Dynamic:</b>
                        <a href="problems/dynamic/df.html">DF</a>
                        
                        <br>
                        <p style="margin-bottom:0.7em;"></p>
                        

                        <b>Related:</b>
                        <a href="problems/definition.html">Problem Definition</a>,
                        <a href="gradients/index.html">Gradients</a>,
                        <a href="problems/parallelization.html">Parallelization</a>
                    </div>
                </div>
            </div>
        </div>


    </div>


    <div class="row row-eq-height">

        <div class="col-12 col-lg-6 p-1"onclick="location.href='algorithms/index.html';">
            <div class="entry border p-3">
                <div class="d-flex flex-row">

                    <div class="icon col-2">
                        <i class="fas fa-search fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading"><a href="algorithms/index.html">Algorithms</a></p>

                        <b>Single-objective:</b>
                        <a href="algorithms/soo/ga.html">GA</a>,
                        <a href="algorithms/soo/de.html">DE</a>,
                        <a href="algorithms/soo/pso.html">PSO</a>,
                        <a href="algorithms/soo/nelder.html">Nelder Mead</a>,
                        <a href="algorithms/soo/pattern.html">Pattern Search</a>,
                        <a href="algorithms/soo/brkga.html">BRKGA</a>,
                        <a href="algorithms/soo/es.html">ES</a>,
                        <a href="algorithms/soo/sres.html">SRES</a>,
                        <a href="algorithms/soo/isres.html">ISRES</a>,
                        <a href="algorithms/soo/cmaes.html">CMA-ES</a>,
                        <a href="algorithms/soo/g3pcx.html">G3PCX</a><img class="new-flag" src="_static/new-flag.svg">

                        <p style="margin-bottom:0.4em;"></p>
                        <p style="margin-bottom:0.4em;"></p>

                        <b>Multi-objective:</b>
                        <a href="algorithms/moo/nsga2.html">NSGA-II</a>,
                        <a href="algorithms/moo/rnsga2.html">R-NSGA-II</a>

                        <br>
                        <p style="margin-bottom:0.4em;"></p>

                        <b>Many-objective:</b>
                        <a href="algorithms/moo/nsga3.html">NSGA-III</a>,
                        <a href="algorithms/moo/rnsga3.html">R-NSGA-III</a>,
                        <a href="algorithms/moo/unsga3.html">U-NSGA-III</a>,
                        <a href="algorithms/moo/moead.html">MOEA/D</a>,
                        <a href="algorithms/moo/age.html">AGE-MOEA</a>,
                        <a href="algorithms/moo/age2.html">AGE-MOEA2</a>,
                        <a href="algorithms/moo/rvea.html">RVEA</a>,
                        <a href="algorithms/moo/sms.html">SMS-EMOA</a>
                        <br>
                        
                        <b>Dynamic:</b>
                        <a href="algorithms/moo/dnsga2.html">D-NSGA-II</a>,
                        <a href="algorithms/moo/kgb.html">KGB</a><img class="new-flag" src="_static/new-flag.svg">
                        <br>
                        <p style="margin-bottom:0.7em;"></p>

                        <b>Related:</b>
                        <a href="misc/reference_directions.html">Reference Directions</a>,
                        <a href="constraints/index.html">Constraints</a>,
                        <a href="misc/convergence.html">Convergence</a>,
                        <a href="algorithms/hyperparameters.html">Hyperparameters</a><img class="new-flag" src="_static/new-flag.svg">


                    </div>
                </div>
            </div>
        </div>

        <div class="col-12 col-lg-6 p-1" onclick="location.href='customization/index.html';">
            <div class="entry border p-3">
                <div class="d-flex flex-row">
                    <div class="icon col-2">
                        <i class="fas fa-book-open fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading"><a href="customization/index.html">Customization</a></p>

                        <b>Variable Types:</b>
                        <a href="customization/binary.html">Binary</a>,
                        <a href="customization/discrete.html">Discrete</a>,
                        <a href="customization/permutation.html">Permutation</a>,
                        <a href="customization/mixed.html">Mixed</a><img class="new-flag" src="_static/new-flag.svg">,
                        <a href="customization/custom.html">Custom</a>

                        <br>
                        <p style="margin-bottom:0.4em;"></p>

                        <b>Examples:</b>
                        <a href="customization/initialization.html">Biased Initialization</a>,
                        <a href="customization/permutation.html#Traveling-Salesman-Problem-(TSP)">Traveling Salesman</a>
                    </div>

                </div>
            </div>
        </div>


    </div>


    <div class="row row-eq-height">

        <div class="col-12 col-lg-6 p-1"onclick="location.href='operators/index.html';">
            <div class="entry border p-3">
                <div class="d-flex flex-row">


                    <div class="icon col-2">
                        <i class="fas fa-tools fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading"><a href="operators/index.html">Operators</a></p>


                        <a href="operators/sampling.html">Sampling:</a>
                        Random, LHS
                        </br>
                        <a href="operators/selection.html">Selection:</a>
                        Random, Binary Tournament

                        </br>
                        <a href="operators/crossover.html">Crossover:</a>
                        SBX, UX, HUX, DE Point, Exponential, OX, ERX
                        </br>

                        <a href="operators/mutation.html">Mutation:</a>
                        Polynomial, Bitflip, Inverse Mutation
                        </br>
                        <a href="operators/repair.html">Repair</a>

                    </div>

                </div>
            </div>
        </div>

        <div class="col-12 col-lg-6 p-1" onclick="location.href='visualization/index.html';">
            <div class="entry border p-3">
                <div class="d-flex flex-row">

                    <div class="icon col-2">
                        <i class="fas fa-chart-line fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading"><a href="visualization/index.html">Visualization</a></p>

                        <a href="visualization/scatter.html">Scatter Plot (2D/3D/ND)</a>,
                        <a href="visualization/pcp.html">Parallel Coordinate Plot (PCP) </a>,
                        <a href="visualization/radviz.html">Radviz</a>,
                        <a href="visualization/star.html">Star Coordinates</a>,
                        <a href="visualization/heatmap.html">Heatmap</a>,
                        <a href="visualization/petal.html">Petal Diagram</a>,
                        <a href="visualization/radar.html">Spider Web / Radar</a>,
                        <a href="visualization/video.html">Video</a>

                    </div>
                </div>
            </div>
        </div>


    </div>


    <div class="row row-eq-height">

        <div class="col-12 col-lg-6 p-1" onclick="location.href='mcdm/index.html';">
            <div class="entry border p-3">
                <div class="d-flex flex-row">

                    <div class="icon col-2">
                        <i class="fas fa-balance-scale fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading"><a href="mcdm/index.html">Multi-Criteria Decision Making</a></p>

                        <a href="mcdm/index.html#nb-compromise">Compromise Programming</a>,
                        <a href="mcdm/index.html#nb-pseudo-weights">Pseudo Weights</a>,
                        <a href="mcdm/index.html#nb-high-tradeoff">High Trade-off Points</a>
                    </div>
                </div>
            </div>
        </div>


        <div class="col-12 col-lg-6 p-1" onclick="location.href='misc/indicators.html';">
            <div class="entry border p-3">
                <div class="d-flex flex-row">
                    <div class="icon col-2">
                        <i class="fas fa-medal fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading"><a href="misc/indicators.html">Performance Indicator</a></p>

                        <a href="misc/indicators.html#nb-gd">GD</a>,
                        <a href="misc/indicators.html#nb-gd-plus">GD+</a>,
                        <a href="misc/indicators.html#nb-igd">IGD</a>,
                        <a href="misc/indicators.html#nb-igd-plus">IGD+</a>,
                        <a href="misc/indicators.html#nb-hv">Hypervolume</a>,
                        <a href="misc/kktpm.html">KKTPM</a>
                    </div>
                </div>
            </div>
        </div>

    </div>

        <div class="row row-eq-height">

        <div class="col-12 col-lg-6 p-1" onclick="location.href='misc/decomposition.html';">
            <div class="entry border p-3">
                <div class="d-flex flex-row">
                    <div class="icon col-2">
                        <i class="fas fa-layer-group fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading"><a href="misc/decomposition.html">Decomposition</a></p>

                        <a href="misc/decomposition.html#nb-weighted-sum">Weighted-Sum</a>,
                        <a href="misc/decomposition.html#nb-asf">ASF</a>,
                        <a href="misc/decomposition.html#nb-aasf">AASF</a>,
                        <a href="misc/decomposition.html#nb-tchebi">Tchebysheff</a>,
                        <a href="misc/decomposition.html#nb-pbi">PBI</a>
                    </div>
                </div>
            </div>
        </div>


        <div class="col-12 col-lg-6 p-1" onclick="location.href='case_studies/index.html';">
            <div class="entry border p-3">
                <div class="d-flex flex-row">
                    <div class="icon col-2">
                        <i class="fas fa-business-time fa-2x"></i>
                    </div>
                    <div class="desc col-10">
                        <p class="portfolio-heading"><a href="case_studies/index.html">Case Studies</a></p>
                        <a href="case_studies/subset_selection.html">Subset Selection</a>,
                        <a href="case_studies/portfolio_allocation.html">Portfolio Allocation</a><img class="new-flag" src="_static/new-flag.svg">
                        

                    </div>
                </div>
            </div>
        </div>

    </div>





</div>

================================================
FILE: docs/source/index.rst
================================================
.. TransOPT documentation master file, created by
   sphinx-quickstart on Mon Aug 19 16:00:09 2024.
   You can adapt this file completely to your liking, but it should at least
   contain the root `toctree` directive.

.. _home:


TRANSOPT: Transfer Optimization System for Bayesian Optimization Using Transfer Learning
========================================================================================
TransOPT is an open-source software platform designed to facilitate the design, benchmarking, and application of transfer learning for Bayesian optimization (TLBO) algorithms through a modular, data-centric framework.

.. raw:: html
   :file: home/guide.html


Video Demonstration
********************************************************************************
Watch the following video for a quick overview of TransOPT's capabilities:

.. raw:: html

   <iframe width="560" height="315" src="https://www.youtube.com/embed/8l25_6fArxY?si=7WunSY06lrQNbkkb" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe>


Features
****************************************************
Download .txt
gitextract__y7m6kto/

├── .gitignore
├── LICENSE
├── MANIFEST.in
├── README.md
├── demo/
│   ├── analysis.py
│   ├── causal_analysis.py
│   ├── comparison/
│   │   ├── analysis_hypervolume.py
│   │   ├── analysis_plot.py
│   │   ├── experiment_gcc.py
│   │   ├── experiment_llvm.py
│   │   ├── features_by_workload_gcc.json
│   │   ├── features_by_workload_gcc_extra.json
│   │   ├── features_by_workload_llvm.json
│   │   ├── plot.py
│   │   ├── plot_samples_dbms.py
│   │   └── start_server.py
│   ├── correlation_analysis.py
│   ├── experiment_lsh_validity.py
│   ├── experiments.py
│   ├── importances/
│   │   ├── cal_relationship.py
│   │   ├── draw_obj_heatmap.py
│   │   └── get_feature_importances.py
│   ├── jacard_exec_times.csv
│   ├── lsh_exec_times.csv
│   ├── optimize_profile.prof
│   ├── random_sample_compiler.py
│   ├── random_sample_dbms.py
│   └── sampling/
│       ├── random_sample_compiler.py
│       └── random_sample_dbms.py
├── docs/
│   ├── Makefile
│   ├── make.bat
│   └── source/
│       ├── _static/
│       │   └── custom.css
│       ├── conf.py
│       ├── development/
│       │   ├── api_reference.rst
│       │   └── architecture.rst
│       ├── faq.rst
│       ├── home/
│       │   ├── feature.html
│       │   ├── guide.html
│       │   └── portfolio.html
│       ├── index.rst
│       ├── installation.rst
│       ├── quickstart.rst
│       └── usage/
│           ├── TOS.bib
│           ├── algorithms.rst
│           ├── cli.rst
│           ├── data_manage.rst
│           ├── problems.rst
│           ├── results.rst
│           └── visualization.rst
├── extra_requirements/
│   ├── analysis.json
│   └── remote.json
├── requirements.txt
├── resources/
│   └── docker/
│       └── absolut_image/
│           ├── Dockerfile
│           └── prepare_antigen.sh
├── scripts/
│   ├── init_csstuning.sh
│   └── init_docker.sh
├── setup.py
├── tests/
│   ├── EXP_NSGA2.py
│   ├── EXP_NSGA2_restart.py
│   ├── EXP_bohb.py
│   ├── EXP_grid.py
│   ├── EXP_hebo.py
│   ├── EXP_hyperopt.py
│   ├── EXP_random.py
│   ├── EXP_smac.py
│   ├── EXP_tpe.py
│   └── data_analysis.py
├── transopt/
│   ├── ResultAnalysis/
│   │   ├── AnalysisBase.py
│   │   ├── AnalysisPipeline.py
│   │   ├── AnalysisReport.py
│   │   ├── CasualAnalysis.py
│   │   ├── CompileTex.py
│   │   ├── CorrelationAnalysis.py
│   │   ├── MakeGif.py
│   │   ├── PFAnalysis.py
│   │   ├── PlotAnalysis.py
│   │   ├── ReportNote.py
│   │   ├── TableAnalysis.py
│   │   ├── TableToLatex.py
│   │   ├── TrackOptimization.py
│   │   └── __init__.py
│   ├── __init__.py
│   ├── agent/
│   │   ├── __init__.py
│   │   ├── app.py
│   │   ├── chat/
│   │   │   ├── openai_chat.py
│   │   │   ├── prompt
│   │   │   ├── prompt.bak
│   │   │   └── yaml_generator.py
│   │   ├── config.py
│   │   ├── registry.py
│   │   ├── run_cli.py
│   │   ├── services.py
│   │   └── testood.py
│   ├── analysis/
│   │   ├── compile_tex.py
│   │   ├── effect_size.py
│   │   ├── mds.py
│   │   ├── parameter_network.py
│   │   ├── table.py
│   │   └── table_to_latex.py
│   ├── benchmark/
│   │   ├── CPD/
│   │   │   └── __init__.py
│   │   ├── CSSTuning/
│   │   │   ├── Compiler.py
│   │   │   ├── DBMS.py
│   │   │   └── __init__.py
│   │   ├── DownloadBench/
│   │   │   └── references
│   │   ├── HBOROB/
│   │   │   ├── algorithms.py
│   │   │   ├── hporobust.py
│   │   │   └── test.py
│   │   ├── HPO/
│   │   │   ├── HPO.py
│   │   │   ├── HPOAdaBoost.py
│   │   │   ├── HPOSVM.py
│   │   │   ├── HPOXGBoost.py
│   │   │   ├── __init__.py
│   │   │   ├── algorithms.py
│   │   │   ├── augmentation.py
│   │   │   ├── datasets.py
│   │   │   ├── fast_data_loader.py
│   │   │   ├── hparams_registry.py
│   │   │   ├── image_options.py
│   │   │   ├── misc.py
│   │   │   ├── networks.py
│   │   │   ├── test_model.py
│   │   │   ├── visualization.py
│   │   │   └── wide_resnet.py
│   │   ├── HPOB/
│   │   │   ├── HpobBench.py
│   │   │   └── plot.py
│   │   ├── HPOOOD/
│   │   │   ├── algorithms.py
│   │   │   ├── collect_results.py
│   │   │   ├── download.py
│   │   │   ├── fast_data_loader.py
│   │   │   ├── hparams_registry.py
│   │   │   ├── hpoood.py
│   │   │   ├── misc.py
│   │   │   ├── networks.py
│   │   │   ├── ooddatasets.py
│   │   │   └── wide_resnet.py
│   │   ├── RL/
│   │   │   ├── LunarlanderBenchmark.py
│   │   │   └── __init__.py
│   │   ├── __init__.py
│   │   ├── instantiate_problems.py
│   │   ├── problem_base/
│   │   │   ├── __init__.py
│   │   │   ├── base.py
│   │   │   ├── non_tab_problem.py
│   │   │   ├── tab_problem.py
│   │   │   └── transfer_problem.py
│   │   └── synthetic/
│   │       ├── MovingPeakBenchmark.py
│   │       ├── MultiObjBenchmark.py
│   │       ├── __init__.py
│   │       └── synthetic_problems.py
│   ├── datamanager/
│   │   ├── __init__.py
│   │   ├── database.py
│   │   ├── lsh.py
│   │   ├── manager.py
│   │   └── minhash.py
│   ├── optimizer/
│   │   ├── MultiObjOptimizer/
│   │   │   ├── CauMOpt.py
│   │   │   ├── IEIPV.py
│   │   │   ├── MoeadEGO.py
│   │   │   ├── ParEGO.py
│   │   │   ├── SMSEGO.py
│   │   │   └── __init__.py
│   │   ├── SingleObjOptimizer/
│   │   │   ├── KrigingOptimizer.py
│   │   │   ├── LFL.py
│   │   │   ├── MetaLearningOptimizer.py
│   │   │   ├── MultitaskOptimizer.py
│   │   │   ├── PROptimizer.py
│   │   │   ├── RBFNOptimizer.py
│   │   │   ├── RGPEOptimizer.py
│   │   │   ├── TPEOptimizer.py
│   │   │   ├── VizerOptimizer.py
│   │   │   └── __init__.py
│   │   ├── __init__.py
│   │   ├── acquisition_function/
│   │   │   ├── ConformalLCB.py
│   │   │   ├── __init__.py
│   │   │   ├── acf_base.py
│   │   │   ├── ei.py
│   │   │   ├── get_acf.py
│   │   │   ├── lcb.py
│   │   │   ├── model_manage/
│   │   │   │   ├── CMAESBest.py
│   │   │   │   ├── CMAESGeneration.py
│   │   │   │   ├── CMAESPreSelect.py
│   │   │   │   ├── DEBest.py
│   │   │   │   ├── DEGeneration.py
│   │   │   │   ├── DEPreSelect.py
│   │   │   │   ├── GABest.py
│   │   │   │   ├── GAGeneration.py
│   │   │   │   ├── GAPreSelect.py
│   │   │   │   ├── PSOBest.py
│   │   │   │   ├── PSOGeneration.py
│   │   │   │   └── PSOPreSelect.py
│   │   │   ├── moeadego.py
│   │   │   ├── pi.py
│   │   │   ├── piei.py
│   │   │   ├── sequential.py
│   │   │   ├── smsego.py
│   │   │   └── taf.py
│   │   ├── construct_optimizer.py
│   │   ├── model/
│   │   │   ├── HyperBO.py
│   │   │   ├── __init__.py
│   │   │   ├── bohb.py
│   │   │   ├── deepkernel.py
│   │   │   ├── dyhpo.py
│   │   │   ├── get_model.py
│   │   │   ├── gp.py
│   │   │   ├── hebo.py
│   │   │   ├── mhgp.py
│   │   │   ├── mlp.py
│   │   │   ├── model_base.py
│   │   │   ├── moeadego.py
│   │   │   ├── mtgp.py
│   │   │   ├── neuralprocess.py
│   │   │   ├── parego.py
│   │   │   ├── pr.py
│   │   │   ├── rbfn.py
│   │   │   ├── rf.py
│   │   │   ├── rgpe.py
│   │   │   ├── sgpt.py
│   │   │   ├── smsego.py
│   │   │   └── utils.py
│   │   ├── normalizer/
│   │   │   ├── __init__.py
│   │   │   ├── normalizer_base.py
│   │   │   └── standerd.py
│   │   ├── optimizer_base/
│   │   │   ├── EvoOptimizerBase.py
│   │   │   ├── __init__.py
│   │   │   ├── base.py
│   │   │   └── bo.py
│   │   ├── pretrain/
│   │   │   ├── __init__.py
│   │   │   ├── deepkernelpretrain.py
│   │   │   ├── get_pretrain.py
│   │   │   ├── hyper_bo.py
│   │   │   └── pretrain_base.py
│   │   ├── refiner/
│   │   │   ├── __init__.py
│   │   │   ├── box.py
│   │   │   ├── ellipse.py
│   │   │   ├── get_refiner.py
│   │   │   ├── prune.py
│   │   │   └── refiner_base.py
│   │   ├── sampler/
│   │   │   ├── __init__.py
│   │   │   ├── get_sampler.py
│   │   │   ├── gradient.py
│   │   │   ├── grid.py
│   │   │   ├── lhs.py
│   │   │   ├── lhs_BAK.py
│   │   │   ├── meta.py
│   │   │   ├── random.py
│   │   │   ├── sampler_base.py
│   │   │   └── sobel.py
│   │   └── selector/
│   │       ├── __init__.py
│   │       ├── fuzzy_selector.py
│   │       ├── lsh_selector.py
│   │       └── selector_base.py
│   ├── remote/
│   │   ├── __init__.py
│   │   ├── celeryconfig.py
│   │   ├── experiment_client.py
│   │   ├── experiment_server.py
│   │   ├── experiment_tasks.py
│   │   └── server_manager.sh
│   ├── space/
│   │   ├── __init__.py
│   │   ├── fidelity_space.py
│   │   ├── search_space.py
│   │   └── variable.py
│   └── utils/
│       ├── Initialization.py
│       ├── Kernel.py
│       ├── Normalization.py
│       ├── Prior.py
│       ├── Read.py
│       ├── Visualization.py
│       ├── __init__.py
│       ├── check.py
│       ├── encoding.py
│       ├── hypervolume.py
│       ├── log.py
│       ├── openml_data_manager.py
│       ├── pareto.py
│       ├── path.py
│       ├── plot.py
│       ├── profile.py
│       ├── rng_helper.py
│       ├── serialization.py
│       ├── sk.py
│       └── weights.py
└── webui/
    ├── .gitignore
    ├── LICENSE.md
    ├── package.json
    ├── public/
    │   ├── index.html
    │   ├── manifest.json
    │   ├── robots.txt
    │   └── transopt.psd
    ├── src/
    │   ├── App.css
    │   ├── App.js
    │   ├── App.test.js
    │   ├── app/
    │   │   ├── auth.js
    │   │   ├── init.js
    │   │   └── store.js
    │   ├── components/
    │   │   ├── CalendarView/
    │   │   │   ├── index.js
    │   │   │   └── util.js
    │   │   ├── Cards/
    │   │   │   └── TitleCard.js
    │   │   ├── Input/
    │   │   │   ├── InputText.js
    │   │   │   ├── SearchBar.js
    │   │   │   ├── SelectBox.js
    │   │   │   ├── TextAreaInput.js
    │   │   │   └── ToogleInput.js
    │   │   └── Typography/
    │   │       ├── ErrorText.js
    │   │       ├── HelperText.js
    │   │       ├── Subtitle.js
    │   │       └── Title.js
    │   ├── containers/
    │   │   ├── Header.js
    │   │   ├── Layout.js
    │   │   ├── LeftSidebar.js
    │   │   ├── ModalLayout.js
    │   │   ├── PageContent.js
    │   │   ├── RightSidebar.js
    │   │   ├── SidebarSubmenu.js
    │   │   └── SuspenseContent.js
    │   ├── features/
    │   │   ├── algorithm/
    │   │   │   ├── components/
    │   │   │   │   ├── OptTable.js
    │   │   │   │   └── SelectPlugin.js
    │   │   │   └── index.js
    │   │   ├── analytics/
    │   │   │   ├── charts/
    │   │   │   │   ├── Box.js
    │   │   │   │   ├── Trajectory.js
    │   │   │   │   └── my_theme.json
    │   │   │   ├── components/
    │   │   │   │   ├── LineChart.js
    │   │   │   │   └── SelectTask.js
    │   │   │   └── index.js
    │   │   ├── calendar/
    │   │   │   ├── CalendarEventsBodyRightDrawer.js
    │   │   │   └── index.js
    │   │   ├── charts/
    │   │   │   ├── components/
    │   │   │   │   ├── BarChart.js
    │   │   │   │   ├── DoughnutChart.js
    │   │   │   │   ├── LineChart.js
    │   │   │   │   ├── PieChart.js
    │   │   │   │   ├── ScatterChart.js
    │   │   │   │   └── StackBarChart.js
    │   │   │   └── index.js
    │   │   ├── chatbot/
    │   │   │   ├── ChatBot.js
    │   │   │   └── components/
    │   │   │       ├── ChatUI.js
    │   │   │       └── chatui-theme.css
    │   │   ├── common/
    │   │   │   ├── components/
    │   │   │   │   ├── ConfirmationModalBody.js
    │   │   │   │   └── NotificationBodyRightDrawer.js
    │   │   │   ├── headerSlice.js
    │   │   │   ├── modalSlice.js
    │   │   │   └── rightDrawerSlice.js
    │   │   ├── dashboard/
    │   │   │   ├── components/
    │   │   │   │   ├── AmountStats.js
    │   │   │   │   ├── BarChart.js
    │   │   │   │   ├── DashboardStats.js
    │   │   │   │   ├── DashboardTopBar.js
    │   │   │   │   ├── DoughnutChart.js
    │   │   │   │   ├── Footprint.js
    │   │   │   │   ├── Importance.js
    │   │   │   │   ├── LineChart.js
    │   │   │   │   ├── PageStats.js
    │   │   │   │   ├── ScatterChart.js
    │   │   │   │   ├── Trajectory.js
    │   │   │   │   ├── UserChannels.js
    │   │   │   │   └── my_theme.json
    │   │   │   └── index.js
    │   │   ├── documentation/
    │   │   │   ├── DocComponents.js
    │   │   │   ├── DocFeatures.js
    │   │   │   ├── DocGettingStarted.js
    │   │   │   └── components/
    │   │   │       ├── DocComponentsContent.js
    │   │   │       ├── DocComponentsNav.js
    │   │   │       ├── FeaturesContent.js
    │   │   │       ├── FeaturesNav.js
    │   │   │       ├── GettingStartedContent.js
    │   │   │       └── GettingStartedNav.js
    │   │   ├── experiment/
    │   │   │   ├── components/
    │   │   │   │   ├── DashboardStats.js
    │   │   │   │   ├── SearchData.js
    │   │   │   │   ├── SelectAlgorithm.js
    │   │   │   │   ├── SelectData.js
    │   │   │   │   └── SelectTask.js
    │   │   │   └── index.js
    │   │   ├── integration/
    │   │   │   └── index.js
    │   │   ├── leads/
    │   │   │   ├── components/
    │   │   │   │   └── AddLeadModalBody.js
    │   │   │   ├── index.js
    │   │   │   └── leadSlice.js
    │   │   ├── run/
    │   │   │   ├── components/
    │   │   │   │   ├── DataTable.js
    │   │   │   │   ├── OptTable.js
    │   │   │   │   ├── Run.js
    │   │   │   │   ├── RunProgress.js
    │   │   │   │   └── TaskTable.js
    │   │   │   └── index.js
    │   │   ├── seldata/
    │   │   │   ├── components/
    │   │   │   │   ├── DataTable.js
    │   │   │   │   ├── SearchData.js
    │   │   │   │   ├── SelectData.css
    │   │   │   │   └── SelectData.js
    │   │   │   └── index.js
    │   │   ├── settings/
    │   │   │   ├── billing/
    │   │   │   │   └── index.js
    │   │   │   ├── profilesettings/
    │   │   │   │   └── index.js
    │   │   │   └── team/
    │   │   │       └── index.js
    │   │   ├── transactions/
    │   │   │   └── index.js
    │   │   └── user/
    │   │       ├── ForgotPassword.js
    │   │       ├── LandingIntro.js
    │   │       ├── Login.js
    │   │       ├── Register.js
    │   │       └── components/
    │   │           └── TemplatePointers.js
    │   ├── index.css
    │   ├── index.js
    │   ├── pages/
    │   │   ├── GettingStarted.js
    │   │   └── protected/
    │   │       ├── 404.js
    │   │       ├── Algorithm.js
    │   │       ├── Analytics.js
    │   │       ├── Bills.js
    │   │       ├── Blank.js
    │   │       ├── Calendar.js
    │   │       ├── Charts.js
    │   │       ├── ChatOpt.js
    │   │       ├── Dashboard.js
    │   │       ├── Experiment.js
    │   │       ├── Integration.js
    │   │       ├── Leads.js
    │   │       ├── ProfileSettings.js
    │   │       ├── Run.js
    │   │       ├── Seldata.js
    │   │       ├── Team.js
    │   │       ├── Transactions.js
    │   │       └── Welcome.js
    │   ├── reportWebVitals.js
    │   ├── routes/
    │   │   ├── index.js
    │   │   └── sidebar.js
    │   ├── setupTests.js
    │   └── utils/
    │       ├── dummyData.js
    │       └── globalConstantUtil.js
    └── tailwind.config.js
Download .txt
SYMBOL INDEX (2302 symbols across 310 files)

FILE: demo/analysis.py
  function run_analysis (line 9) | def run_analysis(Exper_folder:Path, tasks, methods, seeds, args):

FILE: demo/causal_analysis.py
  function run_analysis (line 9) | def run_analysis(Exper_folder:Path, tasks, methods, seeds, args):

FILE: demo/comparison/analysis_hypervolume.py
  function load_and_prepare_data (line 28) | def load_and_prepare_data(file_path, objectives):
  function load_data (line 55) | def load_data(workload, algorithm, seed):
  function collect_all_data (line 63) | def collect_all_data(workload):
  function calculate_mean_hypervolume (line 75) | def calculate_mean_hypervolume(
  function calculate_hypervolumes (line 111) | def calculate_hypervolumes(
  function analyze_and_compare_algorithms (line 139) | def analyze_and_compare_algorithms(workload_results):
  function matrix_to_latex (line 182) | def matrix_to_latex(analysis_results, caption):
  function load_workloads (line 277) | def load_workloads():

FILE: demo/comparison/analysis_plot.py
  function load_and_prepare_data (line 37) | def load_and_prepare_data(file_path):
  function load_data (line 65) | def load_data(workload, algorithm, seed):
  function collect_all_data (line 73) | def collect_all_data(workload):
  function dynamic_plot (line 85) | def dynamic_plot(workload, algorithm, seed):
  function save_individual_frames (line 233) | def save_individual_frames(workload, algorithm, seed):
  function load_workloads (line 264) | def load_workloads():
  function plot_pareto_front (line 311) | def plot_pareto_front(workload):
  function plot_all (line 345) | def plot_all(workload, algorithm=""):
  function plot_all_2d (line 377) | def plot_all_2d(workload, algorithm=""):

FILE: demo/comparison/experiment_gcc.py
  function execute_tasks (line 25) | def execute_tasks(tasks, args):
  function split_into_segments (line 33) | def split_into_segments(lst, n):
  function get_workloads (line 39) | def get_workloads(workloads, split_index, total_splits=10):
  function load_features (line 47) | def load_features():
  function configure_experiment (line 53) | def configure_experiment(workload, features, seed, optimizer_name, exp_p...
  function main (line 77) | def main(optimizers = [], repeat=5, budget=500, init_number=21):
  function main_debug (line 122) | def main_debug(repeat=1, budget=20, init_number=10):

FILE: demo/comparison/experiment_llvm.py
  function execute_tasks (line 25) | def execute_tasks(tasks, args):
  function split_into_segments (line 33) | def split_into_segments(lst, n):
  function get_workloads (line 39) | def get_workloads(workloads, split_index, total_splits=10):
  function load_features (line 47) | def load_features(file_path):
  function configure_experiment (line 52) | def configure_experiment(workload, features, seed, optimizer_name, exp_p...
  function main (line 75) | def main(optimizers = [], repeat=5, budget=500, init_number=21):
  function main_debug (line 103) | def main_debug(repeat=1, budget=20, init_number=10):

FILE: demo/comparison/plot.py
  function create_plots (line 17) | def create_plots(data, file_name, format="pdf"):
  function load_data (line 59) | def load_data(workload, algorithm, seed):
  function load_and_prepare_data (line 67) | def load_and_prepare_data(file_path):
  function get_data_ranges (line 91) | def get_data_ranges(data):
  function rescale_data (line 97) | def rescale_data(data, original_range, target_range):
  function map_data_to_mysql_ranges (line 104) | def map_data_to_mysql_ranges(data, gcc_llvm_range, mysql_range):
  function invert_mapping (line 113) | def invert_mapping(value, min_val, max_val):

FILE: demo/comparison/plot_samples_dbms.py
  function load_and_prepare_data (line 27) | def load_and_prepare_data(file_path):
  function load_data (line 58) | def load_data(workload):
  function plot_pareto_front (line 64) | def plot_pareto_front(workload):
  function plot_all (line 84) | def plot_all(workload):

FILE: demo/comparison/start_server.py
  function generate_index_html (line 14) | def generate_index_html():
  function start_http_server (line 31) | def start_http_server():

FILE: demo/correlation_analysis.py
  function run_analysis (line 9) | def run_analysis(Exper_folder:Path, tasks, methods, seeds, args):

FILE: demo/experiment_lsh_validity.py
  function generate_random_string (line 50) | def generate_random_string(length):
  function generate_dataset_config (line 55) | def generate_dataset_config():
  function create_experiment_datasets (line 98) | def create_experiment_datasets(dm, num_datasets):
  function get_shingles (line 104) | def get_shingles(text, ngram=5):
  function cal_jacard_similarity (line 108) | def cal_jacard_similarity(cfg1, cfg2):
  function validity_experiment (line 118) | def validity_experiment(n_tables, num_replicates=3, jacard_lower_bound =...

FILE: demo/experiments.py
  function run_experiments (line 21) | def run_experiments(tasks, args):

FILE: demo/importances/cal_relationship.py
  function load_and_prepare_data (line 31) | def load_and_prepare_data(file_path, objectives):
  function cal_dcor (line 56) | def cal_dcor(df, objectives):
  function cal_spearman_corr (line 69) | def cal_spearman_corr(df, objectives):
  function cal_pearson_corr (line 86) | def cal_pearson_corr(df, objectives):
  function generate_grid_plot (line 103) | def generate_grid_plot(dcor_values_dict):

FILE: demo/importances/draw_obj_heatmap.py
  function generate_grid_plot_combine (line 22) | def generate_grid_plot_combine(dcor_values_dicts):
  function generate_grid_plot (line 49) | def generate_grid_plot(dcor_values_dict, file_name):

FILE: demo/importances/get_feature_importances.py
  function load_and_prepare_data (line 29) | def load_and_prepare_data(file_path, objectives):
  function calculate_feature_importances (line 54) | def calculate_feature_importances(df, objective):
  function aggregate_importances (line 71) | def aggregate_importances(importances_list):
  function combine_and_rank_features (line 80) | def combine_and_rank_features(importances_list):
  function get_top_combined_features (line 94) | def get_top_combined_features(common_features, combined_ranked, total_fe...
  function find_common_features (line 113) | def find_common_features(importances_list):
  function train_and_evaluate_model (line 139) | def train_and_evaluate_model(
  function get_workloads_improved (line 171) | def get_workloads_improved():
  function get_features_for_exp (line 250) | def get_features_for_exp(workloads, repetitions=5):

FILE: demo/random_sample_compiler.py
  function run_experiments (line 24) | def run_experiments(tasks, args):
  function split_into_segments (line 32) | def split_into_segments(lst, n):

FILE: demo/random_sample_dbms.py
  function run_experiments (line 23) | def run_experiments(tasks, args):
  function split_into_segments (line 31) | def split_into_segments(lst, n):

FILE: demo/sampling/random_sample_compiler.py
  function run_experiments (line 25) | def run_experiments(tasks, args):
  function split_into_segments (line 33) | def split_into_segments(lst, n):

FILE: demo/sampling/random_sample_dbms.py
  function run_experiments (line 27) | def run_experiments(tasks, args):
  function split_into_segments (line 35) | def split_into_segments(lst, n):

FILE: setup.py
  function get_extra_requirements (line 6) | def get_extra_requirements(folder='./extra_requirements'):
  function build_docker_image (line 26) | def build_docker_image(image_name, docker_dir):
  function init_absolut_docker (line 37) | def init_absolut_docker():

FILE: tests/EXP_NSGA2.py
  class HPOProblem (line 7) | class HPOProblem(Problem):
    method __init__ (line 8) | def __init__(self, task_name, budget_type, budget, seed, workload):
    method _evaluate (line 17) | def _evaluate(self, X, out, *args, **kwargs):

FILE: tests/EXP_NSGA2_restart.py
  class HPOProblem (line 14) | class HPOProblem(Problem):
    method __init__ (line 15) | def __init__(self, task_name, budget_type, budget, seed, workload, dat...
    method _evaluate (line 58) | def _evaluate(self, X, out, *args, **kwargs):

FILE: tests/EXP_bohb.py
  function objective (line 10) | def objective(config, budget):
  function get_configspace (line 15) | def get_configspace():

FILE: tests/EXP_grid.py
  function sobol_search (line 5) | def sobol_search(n_samples, task_name, budget_type, budget, seed, worklo...

FILE: tests/EXP_hebo.py
  function objective (line 10) | def objective(config):
  function get_design_space (line 15) | def get_design_space():

FILE: tests/EXP_hyperopt.py
  function objective (line 9) | def objective(params):
  function get_hyperopt_space (line 16) | def get_hyperopt_space():

FILE: tests/EXP_random.py
  function random_search (line 5) | def random_search(n_trials, task_name, budget_type, budget, seed, worklo...

FILE: tests/EXP_smac.py
  function objective (line 12) | def objective(configuration, seed: int = 0):
  function get_configspace (line 19) | def get_configspace():

FILE: tests/EXP_tpe.py
  class formal_obj (line 14) | class formal_obj(ObjectiveFunc):
    method __init__ (line 15) | def __init__(self, f):
    method __call__ (line 18) | def __call__(self, eval_config: Dict[str, Any]) -> Tuple[Dict[str, flo...
  function get_configspace (line 26) | def get_configspace():

FILE: tests/data_analysis.py
  function load_data (line 12) | def load_data(data_folder):
  function get_non_dominated_solutions (line 36) | def get_non_dominated_solutions(data):
  function plot_non_dominated_solutions (line 43) | def plot_non_dominated_solutions(ax, solutions, label, color):
  function compare_nsga2_results_all (line 49) | def compare_nsga2_results_all(res):
  function compare_nsga2_results (line 86) | def compare_nsga2_results(res):
  function calculate_variable_importance (line 116) | def calculate_variable_importance(res):
  function plot_variable_importance (line 150) | def plot_variable_importance(importance):
  function visualize_data_with_metrics (line 172) | def visualize_data_with_metrics(data, metric_name, output_file):

FILE: transopt/ResultAnalysis/AnalysisBase.py
  class Result (line 18) | class Result():
    method __init__ (line 22) | def __init__(self):
  class AnalysisBase (line 29) | class AnalysisBase(abc.ABC, metaclass=abc.ABCMeta):
    method __init__ (line 30) | def __init__(self, exper_folder, methods, seeds, tasks, start = 0, end...
    method read_data_from_kb (line 41) | def read_data_from_kb(self):
    method save_results_to_json (line 74) | def save_results_to_json(self, file_path):
    method load_results_from_json (line 78) | def load_results_from_json(self, file_path):
    method get_results_by_order (line 87) | def get_results_by_order(self, order=None):
    method assign_colors_to_methods (line 133) | def assign_colors_to_methods(self):
    method get_color_for_method (line 172) | def get_color_for_method(self, method:Union[List,str]):
    method get_methods (line 198) | def get_methods(self):
    method get_task_names (line 207) | def get_task_names(self):
    method get_seeds (line 216) | def get_seeds(self):

FILE: transopt/ResultAnalysis/AnalysisPipeline.py
  function analysis_pipeline (line 11) | def analysis_pipeline(Exper_folder, tasks, methods, seeds, args):

FILE: transopt/ResultAnalysis/AnalysisReport.py
  function pdf_to_png (line 6) | def pdf_to_png(pictures_path):
  function create_details_report (line 20) | def create_details_report(details_folders, save_path):
  function create_table_report (line 116) | def create_table_report(save_path):
  function create_report (line 239) | def create_report(save_path):

FILE: transopt/ResultAnalysis/CasualAnalysis.py
  function casual_analysis (line 8) | def casual_analysis(Exper_folder, tasks, methods, seeds, args):

FILE: transopt/ResultAnalysis/CompileTex.py
  function compile_tex (line 6) | def compile_tex(tex_path, output_folder):

FILE: transopt/ResultAnalysis/CorrelationAnalysis.py
  function correlation_analysis (line 9) | def correlation_analysis(Exper_folder, tasks, methods, seeds, args):
  function MutualInformation (line 19) | def MutualInformation(ab:AnalysisBase, dataset_name, method, seed):

FILE: transopt/ResultAnalysis/MakeGif.py
  function make_gif (line 4) | def make_gif(folder_path):

FILE: transopt/ResultAnalysis/PFAnalysis.py
  function parego_analysis (line 8) | def parego_analysis(Exper_folder, tasks, methods, seeds, args):

FILE: transopt/ResultAnalysis/PlotAnalysis.py
  function plot_register (line 20) | def plot_register(name):
  function plot_sk (line 31) | def plot_sk(ab:AnalysisBase, save_path:Path):
  function convergence_rate (line 106) | def convergence_rate(ab:AnalysisBase, save_path:Path, **kwargs):
  function save_traj_data (line 203) | def save_traj_data(ab, save_path):
  function traj2latex (line 259) | def traj2latex(ab: AnalysisBase, save_path: Path):
  function plot_violin (line 361) | def plot_violin(ab:AnalysisBase, save_path, **kwargs):
  function plot_box (line 435) | def plot_box(ab:AnalysisBase, save_path, **kwargs):
  function dbscan_analysis (line 516) | def dbscan_analysis(ab: AnalysisBase, save_path, **kwargs):
  function plot_heatmap (line 640) | def plot_heatmap(ab:AnalysisBase, save_path, **kwargs):

FILE: transopt/ResultAnalysis/TableAnalysis.py
  function Tabel_register (line 13) | def Tabel_register(name):
  function record_mean_std (line 22) | def record_mean_std(ab:AnalysisBase, save_path, **kwargs):
  function record_convergence_rate (line 75) | def record_convergence_rate(ab:AnalysisBase, save_path, **kwargs):

FILE: transopt/ResultAnalysis/TableToLatex.py
  function matrix_to_latex (line 5) | def matrix_to_latex(Data: Dict, col_names, row_names, caption, oder="min"):

FILE: transopt/ResultAnalysis/TrackOptimization.py
  function track_register (line 9) | def track_register(name):

FILE: transopt/agent/app.py
  function create_app (line 13) | def create_app():
  function main (line 276) | def main():

FILE: transopt/agent/chat/openai_chat.py
  function dict_to_string (line 19) | def dict_to_string(dictionary):
  class Message (line 23) | class Message(BaseModel):
    method get_content_string (line 33) | def get_content_string(self) -> str:
    method to_dict (line 41) | def to_dict(self) -> Dict[str, Any]:
    method log (line 48) | def log(self, level: Optional[str] = None):
  class OpenAIChat (line 61) | class OpenAIChat:
    method __init__ (line 64) | def __init__(
    method _get_prompt (line 85) | def _get_prompt(self):
    method client (line 93) | def client(self):
    method invoke_model (line 101) | def invoke_model(self, messages: List[Dict]) -> ChatCompletion:
    method get_response (line 335) | def get_response(self, user_input) -> str:
    method call_manager_function (line 355) | def call_manager_function(self, function_name, **kwargs):
    method _initialize_modules (line 375) | def _initialize_modules(self):
    method get_all_problems (line 384) | def get_all_problems(self):
    method get_optimization_techniques (line 419) | def get_optimization_techniques(self):
    method set_optimization_problem (line 469) | def set_optimization_problem(self, problem_name, workload, budget):
    method set_space_refiner (line 482) | def set_space_refiner(self, refiner):
    method set_sampler (line 486) | def set_sampler(self, Sampler):
    method set_pretrain (line 491) | def set_pretrain(self, Pretrain):
    method set_model (line 495) | def set_model(self, Model):
    method set_normalizer (line 499) | def set_normalizer(self, Normalizer):
    method set_metadata (line 503) | def set_metadata(self, module_name, dataset_name):
    method run_optimization (line 507) | def run_optimization(self):
    method show_configuration (line 552) | def show_configuration(self):
    method install_package (line 556) | def install_package(self, package_name: str) -> str:

FILE: transopt/agent/chat/yaml_generator.py
  function get_prompt (line 9) | def get_prompt(file_name: str) -> str:
  function parse_response (line 19) | def parse_response(response: str) -> Dict[str, Any]:
  function main (line 29) | def main():

FILE: transopt/agent/config.py
  class Config (line 2) | class Config:
  class RunningConfig (line 10) | class RunningConfig:
    method __new__ (line 14) | def __new__(cls, *args, **kwargs):
    method __init__ (line 21) | def __init__(self):
    method set_tasks (line 27) | def set_tasks(self, tasks):
    method set_optimizer (line 30) | def set_optimizer(self, optimizer):
    method set_metadata (line 36) | def set_metadata(self, metadata):

FILE: transopt/agent/registry.py
  class Registry (line 1) | class Registry:
    method __init__ (line 2) | def __init__(self):
    method register (line 5) | def register(self, name=None, cls=None, **kwargs):
    method get (line 20) | def get(self, name):
    method list_names (line 23) | def list_names(self):
    method __getitem__ (line 26) | def __getitem__(self, item):
    method __contains__ (line 29) | def __contains__(self, item):

FILE: transopt/agent/run_cli.py
  function set_task (line 11) | def set_task(services, args):
  function set_optimizer (line 24) | def set_optimizer(services, args):

FILE: transopt/agent/services.py
  class Services (line 20) | class Services:
    method __init__ (line 21) | def __init__(self, task_queue, result_queue, lock):
    method chat (line 40) | def chat(self, user_input):
    method _initialize_modules (line 44) | def _initialize_modules(self):
    method get_modules (line 60) | def get_modules(self):
    method get_comparision_modules (line 143) | def get_comparision_modules(self):
    method search_dataset (line 184) | def search_dataset(self, search_method, dataset_name, dataset_info):
    method convert_metadata (line 203) | def convert_metadata(self, conditions):
    method comparision_search (line 232) | def comparision_search(self, conditions):
    method set_metadata (line 256) | def set_metadata(self, dataset_names):
    method receive_tasks (line 260) | def receive_tasks(self, tasks_info):
    method receive_optimizer (line 280) | def receive_optimizer(self, optimizer_info):
    method receive_metadata (line 285) | def receive_metadata(self, metadata_info):
    method get_all_datasets (line 291) | def get_all_datasets(self):
    method get_experiment_datasets (line 295) | def get_experiment_datasets(self):
    method construct_dataset_info (line 299) | def construct_dataset_info(self, task_set, running_config, seed):
    method get_metadata (line 345) | def get_metadata(self, module_name):
    method save_data (line 356) | def save_data(self, dataset_name, parameters, observations, iteration):
    method remove_dataset (line 363) | def remove_dataset(self, dataset_name):
    method run_optimize (line 372) | def run_optimize(self, seeds):
    method _run_optimize_process (line 383) | def _run_optimize_process(self, seed):
    method terminate_task (line 455) | def terminate_task(self, pid):
    method update_process_info (line 474) | def update_process_info(self, pid, updates):
    method get_all_process_info (line 480) | def get_all_process_info(self):
    method get_box_plot_data (line 483) | def get_box_plot_data(self, task_names):
    method get_report_charts (line 502) | def get_report_charts(self, task_name):
    method get_report_traj (line 523) | def get_report_traj(self, task_name):
    method construct_footprint_data (line 538) | def construct_footprint_data(self, name, var_data, ranges, initial_num...
    method construct_statistic_trajectory_data (line 547) | def construct_statistic_trajectory_data(self, task_names):
    method construct_trajectory_data (line 577) | def construct_trajectory_data(self, name, obj_data, obj_type="minimize"):
    method construct_importance_data (line 611) | def construct_importance_data(self, name, var_data, obj_data, variables):
    method get_configuration (line 615) | def get_configuration(self):

FILE: transopt/agent/testood.py
  function plot_acc_scatter (line 33) | def plot_acc_scatter(train_acc, test_acc):
  function setUp (line 59) | def setUp():
  function list_pth_files (line 63) | def list_pth_files(directory):

FILE: transopt/analysis/compile_tex.py
  function compile_tex (line 6) | def compile_tex(tex_path, output_folder):

FILE: transopt/analysis/mds.py
  class FootPrint (line 7) | class FootPrint:
    method __init__ (line 8) | def __init__(self, X, range):
    method calculate_distances (line 19) | def calculate_distances(self):
    method init_distances (line 37) | def init_distances(self, config_ids, exclude_configs=False):
    method update_distances (line 54) | def update_distances(self, X, distances, config, rejection_threshold=0...
    method get_random_boundary_points (line 88) | def get_random_boundary_points(self, num_samples):
    method get_mds (line 99) | def get_mds(self):
    method plot_embedding (line 107) | def plot_embedding(self):

FILE: transopt/analysis/parameter_network.py
  function calculate_importances (line 11) | def calculate_importances(X, y):
  function calculate_interaction (line 23) | def calculate_interaction(X, y):
  function plot_network (line 57) | def plot_network(X, y, nodes):

FILE: transopt/analysis/table.py
  class Result (line 12) | class Result():
    method __init__ (line 13) | def __init__(self):
  function get_results (line 20) | def get_results(task_names):
  function record_mean_std (line 62) | def record_mean_std(task_names, save_path, **kwargs):

FILE: transopt/analysis/table_to_latex.py
  function matrix_to_latex (line 5) | def matrix_to_latex(Data: Dict, col_names, row_names, caption, oder="min"):

FILE: transopt/benchmark/CSSTuning/Compiler.py
  class GCCTuning (line 12) | class GCCTuning(NonTabularProblem):
    method __init__ (line 19) | def __init__(self, task_name, budget_type, budget, seed, workload, kno...
    method get_configuration_space (line 36) | def get_configuration_space(self):
    method get_fidelity_space (line 49) | def get_fidelity_space(self):
    method get_objectives (line 52) | def get_objectives(self) -> dict:
    method get_problem_type (line 66) | def get_problem_type(self):
    method objective_function (line 69) | def objective_function(self, configuration: dict, fidelity = None, see...
  class LLVMTuning (line 79) | class LLVMTuning(NonTabularProblem):
    method __init__ (line 86) | def __init__(self, task_name, budget_type, budget, seed, workload, kno...
    method get_configuration_space (line 103) | def get_configuration_space(self):
    method get_fidelity_space (line 116) | def get_fidelity_space(self):
    method get_objectives (line 119) | def get_objectives(self) -> dict:
    method get_problem_type (line 126) | def get_problem_type(self):
    method objective_function (line 129) | def objective_function(self, configuration: dict, fidelity = None, see...

FILE: transopt/benchmark/CSSTuning/DBMS.py
  class MySQLTuning (line 12) | class MySQLTuning(NonTabularProblem):
    method __init__ (line 19) | def __init__(self, task_name, budget_type, budget, seed, workload, kno...
    method get_configuration_space (line 30) | def get_configuration_space(self):
    method get_fidelity_space (line 47) | def get_fidelity_space(self):
    method get_objectives (line 50) | def get_objectives(self) -> dict:
    method get_problem_type (line 56) | def get_problem_type(self):
    method objective_function (line 59) | def objective_function(self, configuration: dict, fidelity = None, see...

FILE: transopt/benchmark/HBOROB/algorithms.py
  function get_algorithm_class (line 23) | def get_algorithm_class(algorithm_name):
  class Algorithm (line 29) | class Algorithm(torch.nn.Module):
    method __init__ (line 36) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 40) | def update(self, minibatches, unlabeled=None):
    method predict (line 50) | def predict(self, x):
  class MLP (line 54) | class MLP(nn.Module):
    method __init__ (line 56) | def __init__(self, n_inputs, n_outputs, hparams):
    method forward (line 66) | def forward(self, x):
  class ResNet (line 78) | class ResNet(torch.nn.Module):
    method __init__ (line 80) | def __init__(self, input_shape, hparams):
    method forward (line 111) | def forward(self, x):
    method train (line 115) | def train(self, mode=True):
    method freeze_bn (line 122) | def freeze_bn(self):
  class ERM (line 129) | class ERM(Algorithm):
    method __init__ (line 134) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 150) | def update(self, minibatches, unlabeled=None):
    method predict (line 161) | def predict(self, x):

FILE: transopt/benchmark/HPO/HPO.py
  class HPO_base (line 28) | class HPO_base(NonTabularProblem):
    method __init__ (line 49) | def __init__(
    method create_train_loaders (line 141) | def create_train_loaders(self, batch_size):
    method create_test_loaders (line 158) | def create_test_loaders(self, batch_size):
    method save_checkpoint (line 179) | def save_checkpoint(self, filename):
    method get_configuration_space (line 188) | def get_configuration_space(
    method get_fidelity_space (line 207) | def get_fidelity_space(
    method train (line 215) | def train(self, configuration: dict):
    method save_epoch_results (line 278) | def save_epoch_results(self, results):
    method evaluate_loader (line 283) | def evaluate_loader(self, loader):
    method get_score (line 295) | def get_score(self, configuration: dict):
    method objective_function (line 327) | def objective_function(
    method get_objectives (line 361) | def get_objectives(self) -> Dict:
    method get_problem_type (line 364) | def get_problem_type(self):
  class HPO_ERM (line 369) | class HPO_ERM(HPO_base):
    method __init__ (line 370) | def __init__(
  function test_all_combinations (line 393) | def test_all_combinations():

FILE: transopt/benchmark/HPO/HPOAdaBoost.py
  class XGBoostBenchmark (line 28) | class XGBoostBenchmark(NonTabularProblem):
    method __init__ (line 35) | def __init__(
    method get_data (line 97) | def get_data(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.nda...
    method shuffle_data (line 108) | def shuffle_data(self, seed=None):
    method objective_function (line 115) | def objective_function(
    method objective_function_test (line 177) | def objective_function_test(self, configuration: Union[Dict],
    method get_configuration_space (line 235) | def get_configuration_space(self, seed: Union[int, None] = None):
    method get_fidelity_space (line 264) | def get_fidelity_space(self, seed: Union[int, None] = None):
    method get_meta_information (line 291) | def get_meta_information(self) -> Dict:
    method _get_pipeline (line 312) | def _get_pipeline(self, max_depth: int, eta: float, min_child_weight: ...
    method get_objectives (line 380) | def get_objectives(self) -> Dict:
    method get_problem_type (line 383) | def get_problem_type(self):

FILE: transopt/benchmark/HPO/HPOSVM.py
  class SupportVectorMachine (line 24) | class SupportVectorMachine(NonTabularProblem):
    method __init__ (line 41) | def __init__(
    method get_data (line 91) | def get_data(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.nda...
    method shuffle_data (line 102) | def shuffle_data(self, seed=None):
    method objective_function (line 109) | def objective_function(
    method objective_function_test (line 178) | def objective_function_test(self, configuration: Union[Dict],
    method get_pipeline (line 253) | def get_pipeline(self, C: float, gamma: float) -> pipeline.Pipeline:
    method get_configuration_space (line 270) | def get_configuration_space(self, seed: Union[int, None] = None):
    method get_fidelity_space (line 293) | def get_fidelity_space(self, seed: Union[int, None] = None):
    method get_meta_information (line 323) | def get_meta_information(self):
    method get_objectives (line 349) | def get_objectives(self) -> Dict:
    method get_problem_type (line 352) | def get_problem_type(self):

FILE: transopt/benchmark/HPO/HPOXGBoost.py
  class XGBoostBenchmark (line 28) | class XGBoostBenchmark(NonTabularProblem):
    method __init__ (line 35) | def __init__(
    method get_data (line 97) | def get_data(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.nda...
    method shuffle_data (line 108) | def shuffle_data(self, seed=None):
    method objective_function (line 115) | def objective_function(
    method objective_function_test (line 177) | def objective_function_test(self, configuration: Union[Dict],
    method get_configuration_space (line 235) | def get_configuration_space(self, seed: Union[int, None] = None):
    method get_fidelity_space (line 264) | def get_fidelity_space(self, seed: Union[int, None] = None):
    method get_meta_information (line 291) | def get_meta_information(self) -> Dict:
    method _get_pipeline (line 312) | def _get_pipeline(self, max_depth: int, eta: float, min_child_weight: ...
    method get_objectives (line 380) | def get_objectives(self) -> Dict:
    method get_problem_type (line 383) | def get_problem_type(self):

FILE: transopt/benchmark/HPO/algorithms.py
  function get_algorithm_class (line 28) | def get_algorithm_class(algorithm_name):
  class Algorithm (line 34) | class Algorithm(torch.nn.Module):
    method __init__ (line 41) | def __init__(self, input_shape, num_classes, architecture, model_size,...
    method update (line 51) | def update(self, minibatches, unlabeled=None):
    method predict (line 61) | def predict(self, x):
  class ERM (line 64) | class ERM(Algorithm):
    method __init__ (line 69) | def __init__(self, input_shape, num_classes, architecture, model_size,...
    method update (line 87) | def update(self, minibatches, unlabeled=None):
    method predict (line 114) | def predict(self, x):
  class GLMNet (line 117) | class GLMNet(Algorithm):
    method __init__ (line 122) | def __init__(self, input_shape, num_classes, architecture, model_size,...
    method update (line 146) | def update(self, minibatches, unlabeled=None):
    method predict (line 170) | def predict(self, x):
  class BayesianNN (line 174) | class BayesianNN(Algorithm):
    method __init__ (line 179) | def __init__(self, input_shape, num_classes, hparams):
    method model (line 204) | def model(self, x, y=None):
    method guide (line 217) | def guide(self, x, y=None):
    method update (line 224) | def update(self, minibatches, unlabeled=None):
    method predict (line 235) | def predict(self, x):

FILE: transopt/benchmark/HPO/augmentation.py
  function mixup_data (line 7) | def mixup_data(x, y, alpha=0.3, device='cpu'):
  function mixup_criterion (line 23) | def mixup_criterion(criterion, pred, y_a, y_b, lam):
  class Cutout (line 31) | class Cutout(object):
    method __init__ (line 38) | def __init__(self, n_holes = None, length = None):
    method __call__ (line 48) | def __call__(self, img):
  class ImageNetPolicy (line 80) | class ImageNetPolicy(object):
    method __init__ (line 93) | def __init__(self, fillcolor=(128, 128, 128)):
    method __call__ (line 126) | def __call__(self, img):
    method __repr__ (line 130) | def __repr__(self):
  class CIFAR10Policy (line 134) | class CIFAR10Policy(object):
    method __init__ (line 147) | def __init__(self, fillcolor=(128, 128, 128)):
    method __call__ (line 180) | def __call__(self, img):
    method __repr__ (line 184) | def __repr__(self):
  class CIFAR10PolicyPhotometric (line 188) | class CIFAR10PolicyPhotometric(object):
    method __init__ (line 201) | def __init__(self, fillcolor=(128, 128, 128)):
    method __call__ (line 228) | def __call__(self, img):
    method __repr__ (line 232) | def __repr__(self):
  class CIFAR10PolicyGeometric (line 236) | class CIFAR10PolicyGeometric(object):
    method __init__ (line 249) | def __init__(self, fillcolor=(128, 128, 128)):
    method __call__ (line 258) | def __call__(self, img):
    method __repr__ (line 262) | def __repr__(self):
  class SVHNPolicy (line 266) | class SVHNPolicy(object):
    method __init__ (line 279) | def __init__(self, fillcolor=(128, 128, 128)):
    method __call__ (line 312) | def __call__(self, img):
    method __repr__ (line 316) | def __repr__(self):
  class SubPolicy (line 320) | class SubPolicy(object):
    method __init__ (line 321) | def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, mag...
    method __call__ (line 363) | def __call__(self, img):

FILE: transopt/benchmark/HPO/datasets.py
  function data_transform (line 26) | def data_transform(dataset_name, augmentation_name=None):
  function get_dataset_class (line 69) | def get_dataset_class(dataset_name):
  function num_environments (line 75) | def num_environments(dataset_name):
  class Dataset (line 78) | class Dataset:
    method __getitem__ (line 85) | def __getitem__(self, index):
    method __len__ (line 88) | def __len__(self):
  class RobCifar10 (line 91) | class RobCifar10(Dataset):
    method __init__ (line 92) | def __init__(self, root=None, augment=False):
    method get_available_test_set_names (line 166) | def get_available_test_set_names(self):
    method get_test_set (line 173) | def get_test_set(self, name):
    method get_all_test_sets (line 180) | def get_all_test_sets(self):
  class RobCifar100 (line 186) | class RobCifar100(Dataset):
    method __init__ (line 187) | def __init__(self, root, augment=False):
    method get_transform (line 226) | def get_transform(self, augment):
    method get_test_set (line 242) | def get_test_set(self, name):
    method get_all_test_sets (line 249) | def get_all_test_sets(self):
  class RobImageNet (line 256) | class RobImageNet(Dataset):
    method __init__ (line 257) | def __init__(self, root, augment=False):
    method get_transform (line 281) | def get_transform(self, augment):
    method get_test_set (line 301) | def get_test_set(self, name):
    method get_all_test_sets (line 308) | def get_all_test_sets(self):
  function test_dataset (line 314) | def test_dataset(dataset_name='cifar10', num_samples=5):
  function visualize_dataset_tsne (line 364) | def visualize_dataset_tsne(dataset_name='cifar10', n_samples=1000, perpl...

FILE: transopt/benchmark/HPO/fast_data_loader.py
  class _InfiniteSampler (line 5) | class _InfiniteSampler(torch.utils.data.Sampler):
    method __init__ (line 7) | def __init__(self, sampler):
    method __iter__ (line 10) | def __iter__(self):
  class InfiniteDataLoader (line 15) | class InfiniteDataLoader:
    method __init__ (line 16) | def __init__(self, dataset, batch_size, num_workers):
    method __iter__ (line 34) | def __iter__(self):
    method __len__ (line 38) | def __len__(self):
  class FastDataLoader (line 41) | class FastDataLoader:
    method __init__ (line 44) | def __init__(self, dataset, batch_size, num_workers):
    method __iter__ (line 61) | def __iter__(self):
    method __len__ (line 65) | def __len__(self):

FILE: transopt/benchmark/HPO/hparams_registry.py
  function get_hparams (line 5) | def get_hparams(algorithm, dataset, random_seed, model_size=None, archit...
  function default_hparams (line 34) | def default_hparams(algorithm, dataset, model_size='small', architecture...
  function random_hparams (line 37) | def random_hparams(algorithm, dataset, seed, model_size='small', archite...
  function get_hparam_space (line 40) | def get_hparam_space(algorithm, model_size=None, architecture='resnet'):
  function test_hparam_registry (line 77) | def test_hparam_registry():

FILE: transopt/benchmark/HPO/image_options.py
  class ShearX (line 5) | class ShearX(object):
    method __init__ (line 6) | def __init__(self, fillcolor=(128, 128, 128)):
    method __call__ (line 9) | def __call__(self, x, magnitude):
  class ShearY (line 15) | class ShearY(object):
    method __init__ (line 16) | def __init__(self, fillcolor=(128, 128, 128)):
    method __call__ (line 19) | def __call__(self, x, magnitude):
  class TranslateX (line 25) | class TranslateX(object):
    method __init__ (line 26) | def __init__(self, fillcolor=(128, 128, 128)):
    method __call__ (line 29) | def __call__(self, x, magnitude):
  class TranslateY (line 35) | class TranslateY(object):
    method __init__ (line 36) | def __init__(self, fillcolor=(128, 128, 128)):
    method __call__ (line 39) | def __call__(self, x, magnitude):
  class Rotate (line 45) | class Rotate(object):
    method __call__ (line 48) | def __call__(self, x, magnitude):
  class Color (line 53) | class Color(object):
    method __call__ (line 54) | def __call__(self, x, magnitude):
  class Posterize (line 58) | class Posterize(object):
    method __call__ (line 59) | def __call__(self, x, magnitude):
  class Solarize (line 63) | class Solarize(object):
    method __call__ (line 64) | def __call__(self, x, magnitude):
  class Contrast (line 68) | class Contrast(object):
    method __call__ (line 69) | def __call__(self, x, magnitude):
  class Sharpness (line 73) | class Sharpness(object):
    method __call__ (line 74) | def __call__(self, x, magnitude):
  class Brightness (line 78) | class Brightness(object):
    method __call__ (line 79) | def __call__(self, x, magnitude):
  class AutoContrast (line 83) | class AutoContrast(object):
    method __call__ (line 84) | def __call__(self, x, magnitude):
  class Equalize (line 88) | class Equalize(object):
    method __call__ (line 89) | def __call__(self, x, magnitude):
  class Invert (line 93) | class Invert(object):
    method __call__ (line 94) | def __call__(self, x, magnitude):

FILE: transopt/benchmark/HPO/misc.py
  class _SplitDataset (line 17) | class _SplitDataset(torch.utils.data.Dataset):
    method __init__ (line 19) | def __init__(self, underlying_dataset, keys):
    method __getitem__ (line 23) | def __getitem__(self, key):
    method __len__ (line 25) | def __len__(self):
  function split_dataset (line 28) | def split_dataset(dataset, n, seed=0):
  function accuracy (line 42) | def accuracy(network, loader, device):
  function print_row (line 64) | def print_row(row, colwidth=10, latex=False):
  class LossPlotter (line 80) | class LossPlotter:
    method __init__ (line 81) | def __init__(self):
    method update (line 91) | def update(self, classification_loss, reconstruction_loss):
    method show (line 119) | def show(self):

FILE: transopt/benchmark/HPO/networks.py
  function Featurizer (line 21) | def Featurizer(input_shape, architecture, model_size, hparams):
  class Identity (line 37) | class Identity(nn.Module):
    method __init__ (line 39) | def __init__(self):
    method forward (line 42) | def forward(self, x):
  class MLP (line 46) | class MLP(nn.Module):
    method __init__ (line 48) | def __init__(self, n_inputs, n_outputs, hparams):
    method forward (line 58) | def forward(self, x):
  class ResNet (line 69) | class ResNet(torch.nn.Module):
    method __init__ (line 71) | def __init__(self, input_shape, model_size, hparams):
    method forward (line 108) | def forward(self, x):
    method train (line 112) | def train(self, mode=True):
    method freeze_bn (line 119) | def freeze_bn(self):
  function conv3x3 (line 125) | def conv3x3(in_planes, out_planes, stride=1):
  function conv_init (line 135) | def conv_init(m):
  class wide_basic (line 145) | class wide_basic(nn.Module):
    method __init__ (line 146) | def __init__(self, in_planes, planes, dropout_rate, stride=1):
    method forward (line 161) | def forward(self, x):
  class WideResNet (line 169) | class WideResNet(nn.Module):
    method __init__ (line 171) | def __init__(self, input_shape, model_size, hparams):
    method _wide_layer (line 201) | def _wide_layer(self, block, planes, num_blocks, dropout, stride):
    method forward (line 211) | def forward(self, x):
  class DenseNet (line 222) | class DenseNet(nn.Module):
    method __init__ (line 224) | def __init__(self, input_shape, model_size, hparams):
    method forward (line 249) | def forward(self, x):
  class ht_CNN (line 256) | class ht_CNN(nn.Module):
    method __init__ (line 265) | def __init__(self, input_shape):
    method forward (line 279) | def forward(self, x):
  class CNN (line 300) | class CNN(nn.Module):
    method __init__ (line 304) | def __init__(self, input_shape, hparams):
    method forward (line 316) | def forward(self, x):
  class ContextNet (line 330) | class ContextNet(nn.Module):
    method __init__ (line 331) | def __init__(self, input_shape):
    method forward (line 346) | def forward(self, x):
  class AlexNet (line 349) | class AlexNet(nn.Module):
    method __init__ (line 351) | def __init__(self, input_shape, hparams):
    method forward (line 380) | def forward(self, x):
  function Classifier (line 388) | def Classifier(in_features, out_features, dropout=0.5, is_nonlinear=False):

FILE: transopt/benchmark/HPO/visualization.py
  function get_cifar10_data (line 8) | def get_cifar10_data(transform):

FILE: transopt/benchmark/HPO/wide_resnet.py
  function conv3x3 (line 17) | def conv3x3(in_planes, out_planes, stride=1):
  function conv_init (line 27) | def conv_init(m):
  class wide_basic (line 37) | class wide_basic(nn.Module):
    method __init__ (line 38) | def __init__(self, in_planes, planes, dropout_rate, stride=1):
    method forward (line 55) | def forward(self, x):
  class Wide_ResNet (line 63) | class Wide_ResNet(nn.Module):
    method __init__ (line 65) | def __init__(self, input_shape, depth, widen_factor, dropout_rate):
    method _wide_layer (line 87) | def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
    method forward (line 97) | def forward(self, x):

FILE: transopt/benchmark/HPOB/HpobBench.py
  class HPOb (line 12) | class HPOb():
    method __init__ (line 13) | def __init__(self, search_space_id, data_set_id, xdim, path='./Benchma...
    method transfer (line 39) | def transfer(self, X):
    method normalize (line 42) | def normalize(self, X):
    method data_num (line 45) | def data_num(self):
    method get_var (line 48) | def get_var(self, indexs):
    method get_idx (line 52) | def get_idx(self, vars):
    method get_all_unobserved_var (line 62) | def get_all_unobserved_var(self):
    method get_all_unobserved_idxs (line 65) | def get_all_unobserved_idxs(self):
    method f (line 68) | def f(self,X, indexs):
  function calculate_correlation (line 98) | def calculate_correlation(x1, y1, X2, Y2):

FILE: transopt/benchmark/HPOOOD/algorithms.py
  function get_algorithm_class (line 56) | def get_algorithm_class(algorithm_name):
  class Algorithm (line 62) | class Algorithm(torch.nn.Module):
    method __init__ (line 69) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 73) | def update(self, minibatches, unlabeled=None):
    method predict (line 83) | def predict(self, x):
  class ERM (line 86) | class ERM(Algorithm):
    method __init__ (line 91) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 107) | def update(self, minibatches, unlabeled=None):
    method predict (line 118) | def predict(self, x):
  class Fish (line 122) | class Fish(Algorithm):
    method __init__ (line 128) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method create_clone (line 142) | def create_clone(self, device):
    method fish (line 153) | def fish(self, meta_weights, inner_weights, lr_meta):
    method update (line 159) | def update(self, minibatches, unlabeled=None):
    method predict (line 178) | def predict(self, x):
  class ARM (line 182) | class ARM(ERM):
    method __init__ (line 184) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method predict (line 192) | def predict(self, x):
  class AbstractDANN (line 207) | class AbstractDANN(Algorithm):
    method __init__ (line 210) | def __init__(self, input_shape, num_classes, num_domains,
    method update (line 246) | def update(self, minibatches, unlabeled=None):
    method predict (line 294) | def predict(self, x):
  class DANN (line 297) | class DANN(AbstractDANN):
    method __init__ (line 299) | def __init__(self, input_shape, num_classes, num_domains, hparams):
  class CDANN (line 304) | class CDANN(AbstractDANN):
    method __init__ (line 306) | def __init__(self, input_shape, num_classes, num_domains, hparams):
  class IRM (line 311) | class IRM(ERM):
    method __init__ (line 314) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method _irm_penalty (line 320) | def _irm_penalty(logits, y):
    method update (line 330) | def update(self, minibatches, unlabeled=None):
  class RDM (line 366) | class RDM(ERM):
    method __init__ (line 368) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method my_cdist (line 372) | def my_cdist(self, x1, x2):
    method gaussian_kernel (line 381) | def gaussian_kernel(self, x, y, gamma=[0.0001, 0.001, 0.01, 0.1, 1, 10...
    method mmd (line 391) | def mmd(self, x, y):
    method _moment_penalty (line 398) | def _moment_penalty(p_mean, q_mean, p_var, q_var):
    method _kl_penalty (line 402) | def _kl_penalty(p_mean, q_mean, p_var, q_var):
    method _js_penalty (line 405) | def _js_penalty(self, p_mean, q_mean, p_var, q_var):
    method update (line 411) | def update(self, minibatches, unlabeled=None, held_out_minibatches=None):
  class VREx (line 475) | class VREx(ERM):
    method __init__ (line 477) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 482) | def update(self, minibatches, unlabeled=None):
  class Mixup (line 521) | class Mixup(ERM):
    method __init__ (line 527) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 531) | def update(self, minibatches, unlabeled=None):
  class GroupDRO (line 553) | class GroupDRO(ERM):
    method __init__ (line 558) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 563) | def update(self, minibatches, unlabeled=None):
  class MLDG (line 587) | class MLDG(ERM):
    method __init__ (line 594) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 599) | def update(self, minibatches, unlabeled=None):
  class AbstractMMD (line 703) | class AbstractMMD(ERM):
    method __init__ (line 708) | def __init__(self, input_shape, num_classes, num_domains, hparams, gau...
    method my_cdist (line 716) | def my_cdist(self, x1, x2):
    method gaussian_kernel (line 724) | def gaussian_kernel(self, x, y, gamma=[0.001, 0.01, 0.1, 1, 10, 100,
    method mmd (line 734) | def mmd(self, x, y):
    method update (line 753) | def update(self, minibatches, unlabeled=None):
  class MMD (line 781) | class MMD(AbstractMMD):
    method __init__ (line 786) | def __init__(self, input_shape, num_classes, num_domains, hparams):
  class CORAL (line 791) | class CORAL(AbstractMMD):
    method __init__ (line 796) | def __init__(self, input_shape, num_classes, num_domains, hparams):
  class MTL (line 801) | class MTL(Algorithm):
    method __init__ (line 808) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 829) | def update(self, minibatches, unlabeled=None):
    method update_embeddings_ (line 840) | def update_embeddings_(self, features, env=None):
    method predict (line 851) | def predict(self, x, env=None):
  class SagNet (line 856) | class SagNet(Algorithm):
    method __init__ (line 862) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method forward_c (line 914) | def forward_c(self, x):
    method forward_s (line 918) | def forward_s(self, x):
    method randomize (line 922) | def randomize(self, x, what="style", eps=1e-5):
    method update (line 946) | def update(self, minibatches, unlabeled=None):
    method predict (line 974) | def predict(self, x):
  class RSC (line 978) | class RSC(ERM):
    method __init__ (line 979) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 986) | def update(self, minibatches, unlabeled=None):
  class SD (line 1035) | class SD(ERM):
    method __init__ (line 1040) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 1045) | def update(self, minibatches, unlabeled=None):
  class ANDMask (line 1060) | class ANDMask(ERM):
    method __init__ (line 1066) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 1071) | def update(self, minibatches, unlabeled=None):
    method mask_grads (line 1090) | def mask_grads(self, tau, gradients, params):
  class IGA (line 1105) | class IGA(ERM):
    method __init__ (line 1111) | def __init__(self, in_features, num_classes, num_domains, hparams):
    method update (line 1114) | def update(self, minibatches, unlabeled=None):
  class SelfReg (line 1147) | class SelfReg(ERM):
    method __init__ (line 1148) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 1167) | def update(self, minibatches, unlabeled=None):
  class SANDMask (line 1236) | class SANDMask(ERM):
    method __init__ (line 1242) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 1257) | def update(self, minibatches, unlabeled=None):
    method mask_grads (line 1278) | def mask_grads(self, gradients, params):
  class Fishr (line 1301) | class Fishr(Algorithm):
    method __init__ (line 1304) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method _init_optimizer (line 1327) | def _init_optimizer(self):
    method update (line 1334) | def update(self, minibatches, unlabeled=None):
    method compute_fishr_penalty (line 1362) | def compute_fishr_penalty(self, all_logits, all_y, len_minibatches):
    method _get_grads (line 1367) | def _get_grads(self, logits, y):
    method _get_grads_var_per_domain (line 1384) | def _get_grads_var_per_domain(self, dict_grads, len_minibatches):
    method _compute_distance_grads_var (line 1404) | def _compute_distance_grads_var(self, grads_var_per_domain):
    method predict (line 1428) | def predict(self, x):
  class TRM (line 1431) | class TRM(Algorithm):
    method __init__ (line 1437) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method neum (line 1463) | def neum(v, model, batch):
    method update (line 1498) | def update(self, minibatches, unlabeled=None):
    method predict (line 1590) | def predict(self, x):
    method train (line 1593) | def train(self):
    method eval (line 1596) | def eval(self):
  class IB_ERM (line 1599) | class IB_ERM(ERM):
    method __init__ (line 1602) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 1612) | def update(self, minibatches, unlabeled=None):
  class IB_IRM (line 1656) | class IB_IRM(ERM):
    method __init__ (line 1659) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method _irm_penalty (line 1670) | def _irm_penalty(logits, y):
    method update (line 1680) | def update(self, minibatches, unlabeled=None):
  class AbstractCAD (line 1733) | class AbstractCAD(Algorithm):
    method __init__ (line 1738) | def __init__(self, input_shape, num_classes, num_domains,
    method bn_loss (line 1775) | def bn_loss(self, z, y, dom_labels):
    method update (line 1856) | def update(self, minibatches, unlabeled=None):
    method predict (line 1877) | def predict(self, x):
  class CAD (line 1881) | class CAD(AbstractCAD):
    method __init__ (line 1889) | def __init__(self, input_shape, num_classes, num_domains, hparams):
  class CondCAD (line 1893) | class CondCAD(AbstractCAD):
    method __init__ (line 1900) | def __init__(self, input_shape, num_classes, num_domains, hparams):
  class Transfer (line 1904) | class Transfer(Algorithm):
    method __init__ (line 1907) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method loss_gap (line 1935) | def loss_gap(self, minibatches, device):
    method update (line 1947) | def update(self, minibatches, unlabeled=None):
    method update_second (line 1971) | def update_second(self, minibatches, unlabeled=None):
    method predict (line 1998) | def predict(self, x):
  class AbstractCausIRL (line 2002) | class AbstractCausIRL(ERM):
    method __init__ (line 2004) | def __init__(self, input_shape, num_classes, num_domains, hparams, gau...
    method my_cdist (line 2012) | def my_cdist(self, x1, x2):
    method gaussian_kernel (line 2020) | def gaussian_kernel(self, x, y, gamma=[0.001, 0.01, 0.1, 1, 10, 100,
    method mmd (line 2030) | def mmd(self, x, y):
    method update (line 2049) | def update(self, minibatches, unlabeled=None):
  class CausIRL_MMD (line 2086) | class CausIRL_MMD(AbstractCausIRL):
    method __init__ (line 2088) | def __init__(self, input_shape, num_classes, num_domains, hparams):
  class CausIRL_CORAL (line 2093) | class CausIRL_CORAL(AbstractCausIRL):
    method __init__ (line 2095) | def __init__(self, input_shape, num_classes, num_domains, hparams):
  class EQRM (line 2100) | class EQRM(ERM):
    method __init__ (line 2105) | def __init__(self, input_shape, num_classes, num_domains, hparams, dis...
    method risk (line 2114) | def risk(self, x, y):
    method update (line 2117) | def update(self, minibatches, unlabeled=None):
  class ADRMX (line 2145) | class ADRMX(Algorithm):
    method __init__ (line 2147) | def __init__(self, input_shape, num_classes, num_domains, hparams):
    method update (line 2197) | def update(self, minibatches, unlabeled=None):
    method predict (line 2282) | def predict(self, x):

FILE: transopt/benchmark/HPOOOD/collect_results.py
  function find_jsonl_files (line 16) | def find_jsonl_files(directory):
  function find_dirs (line 29) | def find_dirs(directory):
  function remove_empty_directories (line 37) | def remove_empty_directories(directory):
  function plot_bins (line 51) | def plot_bins(test_data, val_data, save_file_name):
  function plot_traj (line 63) | def plot_traj(test_data, val_data, save_file_name):
  function plot_scatter (line 75) | def plot_scatter(x, y, values, save_file_name):
  function print_table (line 88) | def print_table(table, header_text, row_labels, col_labels, colwidth=10,

FILE: transopt/benchmark/HPOOOD/download.py
  function stage_path (line 22) | def stage_path(data_dir, name):
  function download_and_extract (line 31) | def download_and_extract(url, dst, remove=True):
  function download_vlcs (line 99) | def download_vlcs(data_dir):
  function download_mnist (line 109) | def download_mnist(data_dir):
  function download_pacs (line 117) | def download_pacs(data_dir):
  function download_office_home (line 130) | def download_office_home(data_dir):
  function download_domain_net (line 143) | def download_domain_net(data_dir):
  function download_terra_incognita (line 169) | def download_terra_incognita(data_dir):
  function download_sviro (line 268) | def download_sviro(data_dir):
  function download_spawrious (line 281) | def download_spawrious(data_dir, remove=True):

FILE: transopt/benchmark/HPOOOD/fast_data_loader.py
  class _InfiniteSampler (line 5) | class _InfiniteSampler(torch.utils.data.Sampler):
    method __init__ (line 7) | def __init__(self, sampler):
    method __iter__ (line 10) | def __iter__(self):
  class InfiniteDataLoader (line 15) | class InfiniteDataLoader:
    method __init__ (line 16) | def __init__(self, dataset, weights, batch_size, num_workers):
    method __iter__ (line 41) | def __iter__(self):
    method __len__ (line 45) | def __len__(self):
  class FastDataLoader (line 48) | class FastDataLoader:
    method __init__ (line 51) | def __init__(self, dataset, batch_size, num_workers):
    method __iter__ (line 68) | def __iter__(self):
    method __len__ (line 72) | def __len__(self):

FILE: transopt/benchmark/HPOOOD/hparams_registry.py
  function _define_hparam (line 5) | def _define_hparam(hparams, hparam_name, default_val, random_val_fn):
  function _hparams (line 9) | def _hparams(algorithm, dataset, random_seed):
  function default_hparams (line 224) | def default_hparams(algorithm, dataset):
  function random_hparams (line 228) | def random_hparams(algorithm, dataset, seed):
  function get_hparams (line 231) | def get_hparams(algorithm, dataset):

FILE: transopt/benchmark/HPOOOD/hpoood.py
  function make_record (line 38) | def make_record(step, hparams_seed, envs):
  class HPOOOD_base (line 53) | class HPOOOD_base(NonTabularProblem):
    method __init__ (line 118) | def __init__(
    method save_checkpoint (line 259) | def save_checkpoint(self, filename):
    method get_configuration_space (line 271) | def get_configuration_space(
    method get_fidelity_space (line 293) | def get_fidelity_space(
    method train (line 312) | def train(self, configuration: dict):
    method get_score (line 383) | def get_score(self, configuration: dict):
    method objective_function (line 406) | def objective_function(
    method get_objectives (line 435) | def get_objectives(self) -> Dict:
    method get_problem_type (line 438) | def get_problem_type(self):
  class ERMOOD (line 444) | class ERMOOD(HPOOOD_base):
    method __init__ (line 445) | def __init__(
  class IRMOOD (line 451) | class IRMOOD(HPOOOD_base):
    method __init__ (line 452) | def __init__(
  class ARMOOD (line 458) | class ARMOOD(HPOOOD_base):
    method __init__ (line 459) | def __init__(
  class MixupOOD (line 465) | class MixupOOD(HPOOOD_base):
    method __init__ (line 466) | def __init__(
  class DANNOOD (line 472) | class DANNOOD(HPOOOD_base):
    method __init__ (line 473) | def __init__(

FILE: transopt/benchmark/HPOOOD/misc.py
  function distance (line 20) | def distance(h1, h2):
  function proj (line 28) | def proj(delta, adv_h, h):
  function l2_between_dicts (line 41) | def l2_between_dicts(dict_1, dict_2):
  class MovingAverage (line 50) | class MovingAverage:
    method __init__ (line 52) | def __init__(self, ema, oneminusema_correction=True):
    method update (line 58) | def update(self, dict_data):
  function make_weights_for_balanced_classes (line 81) | def make_weights_for_balanced_classes(dataset):
  function pdb (line 101) | def pdb():
  function seed_hash (line 107) | def seed_hash(*args):
  function print_separator (line 114) | def print_separator():
  function print_row (line 117) | def print_row(row, colwidth=10, latex=False):
  class _SplitDataset (line 131) | class _SplitDataset(torch.utils.data.Dataset):
    method __init__ (line 133) | def __init__(self, underlying_dataset, keys):
    method __getitem__ (line 137) | def __getitem__(self, key):
    method __len__ (line 139) | def __len__(self):
  function split_dataset (line 142) | def split_dataset(dataset, n, seed=0):
  function random_pairs_of_minibatches (line 156) | def random_pairs_of_minibatches(minibatches):
  function split_meta_train_test (line 172) | def split_meta_train_test(minibatches, num_meta_test=1):
  function accuracy (line 188) | def accuracy(network, loader, weights, device):
  class Tee (line 214) | class Tee:
    method __init__ (line 215) | def __init__(self, fname, mode="a"):
    method write (line 219) | def write(self, message):
    method flush (line 224) | def flush(self):
  class ParamDict (line 228) | class ParamDict(OrderedDict):
    method __init__ (line 233) | def __init__(self, *args, **kwargs):
    method _prototype (line 236) | def _prototype(self, other, op):
    method __add__ (line 244) | def __add__(self, other):
    method __rmul__ (line 247) | def __rmul__(self, other):
    method __neg__ (line 252) | def __neg__(self):
    method __rsub__ (line 255) | def __rsub__(self, other):
    method __truediv__ (line 261) | def __truediv__(self, other):
  class Kernel (line 270) | class Kernel(torch.nn.Module):
    method __init__ (line 273) | def __init__(self, bw=None):
    method _diffs (line 277) | def _diffs(self, test_Xs, train_Xs):
    method forward (line 283) | def forward(self, test_Xs, train_Xs):
    method sample (line 286) | def sample(self, train_Xs):
  class GaussianKernel (line 290) | class GaussianKernel(Kernel):
    method forward (line 293) | def forward(self, test_Xs, train_Xs):
    method sample (line 307) | def sample(self, train_Xs):
    method cdf (line 312) | def cdf(self, test_Xs, train_Xs):
  function estimate_bandwidth (line 319) | def estimate_bandwidth(x, method="silverman"):
  class KernelDensityEstimator (line 338) | class KernelDensityEstimator(torch.nn.Module):
    method __init__ (line 341) | def __init__(self, train_Xs, kernel='gaussian', bw_select='Gauss-optim...
    method device (line 362) | def device(self):
    method forward (line 366) | def forward(self, x):
    method sample (line 369) | def sample(self, n_samples):
    method cdf (line 373) | def cdf(self, x):
  class Distribution1D (line 384) | class Distribution1D:
    method __init__ (line 385) | def __init__(self, dist_function=None):
    method parameters (line 394) | def parameters(self):
    method create_dist (line 397) | def create_dist(self):
    method estimate_parameters (line 403) | def estimate_parameters(self, x):
    method log_prob (line 406) | def log_prob(self, x):
    method cdf (line 409) | def cdf(self, x):
    method icdf (line 412) | def icdf(self, q):
    method sample (line 415) | def sample(self, n=1):
    method sample_n (line 421) | def sample_n(self, n=10):
  function continuous_bisect_fun_left (line 425) | def continuous_bisect_fun_left(f, v, lo, hi, n_steps=32):
  class Normal (line 437) | class Normal(Distribution1D):
    method __init__ (line 438) | def __init__(self, location=0, scale=1):
    method parameters (line 444) | def parameters(self):
    method estimate_parameters (line 447) | def estimate_parameters(self, x):
    method icdf (line 453) | def icdf(self, q):
  class Nonparametric (line 466) | class Nonparametric(Distribution1D):
    method __init__ (line 467) | def __init__(self, use_kde=True, bw_select='Gauss-optimal'):
    method parameters (line 474) | def parameters(self):
    method estimate_parameters (line 477) | def estimate_parameters(self, x):
    method icdf (line 484) | def icdf(self, q):
  class SupConLossLambda (line 509) | class SupConLossLambda(torch.nn.Module):
    method __init__ (line 510) | def __init__(self, lamda: float=0.5, temperature: float=0.07):
    method forward (line 515) | def forward(self, features: torch.Tensor, labels: torch.Tensor, domain...

FILE: transopt/benchmark/HPOOOD/networks.py
  function remove_batch_norm_from_resnet (line 12) | def remove_batch_norm_from_resnet(model):
  class Identity (line 36) | class Identity(nn.Module):
    method __init__ (line 38) | def __init__(self):
    method forward (line 41) | def forward(self, x):
  class MLP (line 45) | class MLP(nn.Module):
    method __init__ (line 47) | def __init__(self, n_inputs, n_outputs, hparams):
    method forward (line 57) | def forward(self, x):
  class ResNet (line 69) | class ResNet(torch.nn.Module):
    method __init__ (line 71) | def __init__(self, input_shape, hparams):
    method forward (line 102) | def forward(self, x):
    method train (line 106) | def train(self, mode=True):
    method freeze_bn (line 113) | def freeze_bn(self):
  class MNIST_CNN (line 119) | class MNIST_CNN(nn.Module):
    method __init__ (line 128) | def __init__(self, input_shape):
    method forward (line 142) | def forward(self, x):
  class ContextNet (line 164) | class ContextNet(nn.Module):
    method __init__ (line 165) | def __init__(self, input_shape):
    method forward (line 180) | def forward(self, x):
  function Featurizer (line 184) | def Featurizer(input_shape, hparams):
  function Classifier (line 198) | def Classifier(in_features, out_features, is_nonlinear=False):
  class WholeFish (line 210) | class WholeFish(nn.Module):
    method __init__ (line 211) | def __init__(self, input_shape, num_classes, hparams, weights=None):
    method reset_weights (line 224) | def reset_weights(self, weights):
    method forward (line 227) | def forward(self, x):

FILE: transopt/benchmark/HPOOOD/ooddatasets.py
  function get_dataset_class (line 19) | def get_dataset_class(dataset_name):
  function num_environments (line 26) | def num_environments(dataset_name):
  class MultipleDomainDataset (line 30) | class MultipleDomainDataset:
    method __getitem__ (line 37) | def __getitem__(self, index):
    method __len__ (line 40) | def __len__(self):
  class Debug (line 44) | class Debug(MultipleDomainDataset):
    method __init__ (line 45) | def __init__(self, root, test_envs, hparams):
  class Debug28 (line 58) | class Debug28(Debug):
  class Debug224 (line 62) | class Debug224(Debug):
  class MultipleEnvironmentMNIST (line 67) | class MultipleEnvironmentMNIST(MultipleDomainDataset):
    method __init__ (line 68) | def __init__(self, root, environments, dataset_transform, input_shape,
  class ColoredMNIST (line 99) | class ColoredMNIST(MultipleEnvironmentMNIST):
    method __init__ (line 102) | def __init__(self, root, test_envs, hparams):
    method color_dataset (line 109) | def color_dataset(self, images, labels, environment):
    method torch_bernoulli_ (line 132) | def torch_bernoulli_(self, p, size):
    method torch_xor_ (line 135) | def torch_xor_(self, a, b):
  class RotatedMNIST (line 139) | class RotatedMNIST(MultipleEnvironmentMNIST):
    method __init__ (line 142) | def __init__(self, root, test_envs, hparams):
    method rotate_dataset (line 146) | def rotate_dataset(self, images, labels, angle):
  class MultipleEnvironmentImageFolder (line 162) | class MultipleEnvironmentImageFolder(MultipleDomainDataset):
    method __init__ (line 163) | def __init__(self, root, test_envs, augment, hparams):
  class VLCS (line 203) | class VLCS(MultipleEnvironmentImageFolder):
    method __init__ (line 206) | def __init__(self, root, test_envs, hparams):
  class PACS (line 210) | class PACS(MultipleEnvironmentImageFolder):
    method __init__ (line 213) | def __init__(self, root, test_envs, hparams):
  class DomainNet (line 217) | class DomainNet(MultipleEnvironmentImageFolder):
    method __init__ (line 220) | def __init__(self, root, test_envs, hparams):
  class OfficeHome (line 224) | class OfficeHome(MultipleEnvironmentImageFolder):
    method __init__ (line 227) | def __init__(self, root, test_envs, hparams):
  class TerraIncognita (line 231) | class TerraIncognita(MultipleEnvironmentImageFolder):
    method __init__ (line 234) | def __init__(self, root, test_envs, hparams):
  class SVIRO (line 238) | class SVIRO(MultipleEnvironmentImageFolder):
    method __init__ (line 241) | def __init__(self, root, test_envs, hparams):
  class WILDSEnvironment (line 246) | class WILDSEnvironment:
    method __init__ (line 247) | def __init__(
    method __getitem__ (line 264) | def __getitem__(self, i):
    method __len__ (line 274) | def __len__(self):
  class WILDSDataset (line 278) | class WILDSDataset(MultipleDomainDataset):
    method __init__ (line 280) | def __init__(self, dataset, metadata_name, test_envs, augment, hparams):
    method metadata_values (line 318) | def metadata_values(self, wilds_dataset, metadata_name):
  class WILDSCamelyon (line 324) | class WILDSCamelyon(WILDSDataset):
    method __init__ (line 327) | def __init__(self, root, test_envs, hparams):
  class WILDSFMoW (line 333) | class WILDSFMoW(WILDSDataset):
    method __init__ (line 336) | def __init__(self, root, test_envs, hparams):
  class CustomImageFolder (line 343) | class CustomImageFolder(Dataset):
    method __init__ (line 347) | def __init__(self, folder_path, class_index, limit=None, transform=None):
    method __len__ (line 355) | def __len__(self):
    method __getitem__ (line 358) | def __getitem__(self, index):
  class SpawriousBenchmark (line 368) | class SpawriousBenchmark(MultipleDomainDataset):
    method __init__ (line 374) | def __init__(self, train_combinations, test_combinations, root_dir, au...
    method _prepare_data_lists (line 380) | def _prepare_data_lists(self, train_combinations, test_combinations, r...
    method _create_data_list (line 405) | def _create_data_list(self, combinations, root_dir, transforms):
    method build_type1_combination (line 444) | def build_type1_combination(self,group,test,filler):
    method build_type2_combination (line 467) | def build_type2_combination(self,group,test):
  class SpawriousO2O_easy (line 487) | class SpawriousO2O_easy(SpawriousBenchmark):
    method __init__ (line 488) | def __init__(self, root_dir, test_envs, hparams):
  class SpawriousO2O_medium (line 495) | class SpawriousO2O_medium(SpawriousBenchmark):
    method __init__ (line 496) | def __init__(self, root_dir, test_envs, hparams):
  class SpawriousO2O_hard (line 503) | class SpawriousO2O_hard(SpawriousBenchmark):
    method __init__ (line 504) | def __init__(self, root_dir, test_envs, hparams):
  class SpawriousM2M_easy (line 511) | class SpawriousM2M_easy(SpawriousBenchmark):
    method __init__ (line 512) | def __init__(self, root_dir, test_envs, hparams):
  class SpawriousM2M_medium (line 518) | class SpawriousM2M_medium(SpawriousBenchmark):
    method __init__ (line 519) | def __init__(self, root_dir, test_envs, hparams):
  class SpawriousM2M_hard (line 525) | class SpawriousM2M_hard(SpawriousBenchmark):
    method __init__ (line 527) | def __init__(self, root_dir, test_envs, hparams):

FILE: transopt/benchmark/HPOOOD/wide_resnet.py
  function conv3x3 (line 17) | def conv3x3(in_planes, out_planes, stride=1):
  function conv_init (line 27) | def conv_init(m):
  class wide_basic (line 37) | class wide_basic(nn.Module):
    method __init__ (line 38) | def __init__(self, in_planes, planes, dropout_rate, stride=1):
    method forward (line 55) | def forward(self, x):
  class Wide_ResNet (line 63) | class Wide_ResNet(nn.Module):
    method __init__ (line 65) | def __init__(self, input_shape, depth, widen_factor, dropout_rate):
    method _wide_layer (line 87) | def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
    method forward (line 97) | def forward(self, x):

FILE: transopt/benchmark/RL/LunarlanderBenchmark.py
  function lunar_lander_simulation (line 20) | def lunar_lander_simulation(w, print_reward=False, seed=1, dimension=12):
  function heuristic_controller (line 47) | def heuristic_controller(s, w, is_continuous=True):
  function heuristic_controller5d (line 74) | def heuristic_controller5d(s, w, is_continuous=True):
  function heuristic_controller10d (line 166) | def heuristic_controller10d(s, w, is_continuous=True):
  function vanilla_heuristic (line 193) | def vanilla_heuristic(s, is_continuous=False):
  class LunarlanderBenchmark (line 227) | class LunarlanderBenchmark(NonTabularProblem):
    method __init__ (line 236) | def __init__(self, task_name, task_id, budget, seed, task_type="non-ta...
    method objective_function (line 242) | def objective_function(
    method get_configuration_space (line 254) | def get_configuration_space(
    method get_fidelity_space (line 281) | def get_fidelity_space(
    method get_meta_information (line 304) | def get_meta_information(self) -> Dict:

FILE: transopt/benchmark/instantiate_problems.py
  function InstantiateProblems (line 6) | def InstantiateProblems(

FILE: transopt/benchmark/problem_base/base.py
  class ProblemBase (line 16) | class ProblemBase(abc.ABC):
    method __init__ (line 17) | def __init__(self, seed: Union[int, np.random.RandomState, None] = Non...
    method f (line 45) | def f(self, configuration, fidelity=None, seed=None, **kwargs) -> Dict:
    method objective_function (line 53) | def objective_function(
    method get_configuration_space (line 66) | def get_configuration_space(self) -> SearchSpace:
    method check_validity (line 80) | def check_validity(self, configuration, fidelity):
    method __call__ (line 111) | def __call__(self, configuration: Dict, **kwargs) -> float:
    method get_fidelity_space (line 120) | def get_fidelity_space(self) -> FidelitySpace:
    method get_objectives (line 134) | def get_objectives(self) -> dict:
    method problem_type (line 150) | def problem_type(self):
    method num_objectives (line 154) | def num_objectives(self):
    method num_variables (line 158) | def num_variables(self):

FILE: transopt/benchmark/problem_base/non_tab_problem.py
  class NonTabularProblem (line 18) | class NonTabularProblem(ProblemBase):
    method __init__ (line 19) | def __init__(
    method get_budget_type (line 36) | def get_budget_type(self) -> str:
    method get_budget (line 47) | def get_budget(self) -> int:
    method get_name (line 58) | def get_name(self) -> str:
    method get_type (line 69) | def get_type(self) -> str:
    method get_input_dim (line 80) | def get_input_dim(self) -> int:
    method get_objective_num (line 91) | def get_objective_num(self) -> int:
    method lock (line 94) | def lock(self):
    method unlock (line 97) | def unlock(self):
    method get_lock_state (line 100) | def get_lock_state(self) -> bool:
    method workloads (line 105) | def workloads(self):
    method fidelity (line 110) | def fidelity(self):

FILE: transopt/benchmark/problem_base/tab_problem.py
  class TabularProblem (line 18) | class TabularProblem(ProblemBase):
    method __init__ (line 19) | def __init__(
    method f (line 135) | def f(
    method objective_function (line 148) | def objective_function(
    method sample_dataframe (line 177) | def sample_dataframe(key, df, p_remove=0.):
    method get_configuration_type (line 199) | def get_configuration_type(self):
    method get_configuration_space (line 205) | def get_configuration_space(
    method get_fidelity_space (line 240) | def get_fidelity_space(
    method get_meta_information (line 261) | def get_meta_information(self) -> Dict:
    method get_budget (line 264) | def get_budget(self) -> int:
    method get_name (line 275) | def get_name(self) -> str:
    method get_type (line 286) | def get_type(self) -> str:
    method get_input_dim (line 297) | def get_input_dim(self) -> int:
    method get_objective_num (line 308) | def get_objective_num(self) -> int:
    method lock (line 311) | def lock(self):
    method unlock (line 314) | def unlock(self):
    method get_lock_state (line 317) | def get_lock_state(self) -> bool:
    method get_dataset_size (line 321) | def get_dataset_size(self):
    method get_var_by_idx (line 324) | def get_var_by_idx(self, idx):
    method get_idx_by_var (line 327) | def get_idx_by_var(self, vectors):
    method get_unobserved_vars (line 330) | def get_unobserved_vars(self):
    method get_unobserved_idxs (line 333) | def get_unobserved_idxs(self):

FILE: transopt/benchmark/problem_base/transfer_problem.py
  class TransferProblem (line 13) | class TransferProblem:
    method __init__ (line 14) | def __init__(self, seed: Union[int, np.random.RandomState, None] = Non...
    method add_task_to_id (line 21) | def add_task_to_id(
    method add_task (line 35) | def add_task(
    method del_task_by_id (line 46) | def del_task_by_id(self, del_id, name):
    method get_cur_id (line 49) | def get_cur_id(self):
    method get_tasks_num (line 52) | def get_tasks_num(self):
    method get_unsolved_num (line 55) | def get_unsolved_num(self):
    method get_rest_budget (line 58) | def get_rest_budget(self):
    method get_query_num (line 61) | def get_query_num(self):
    method get_cur_budgettype (line 64) | def get_cur_budgettype(self):
    method get_cur_budget (line 67) | def get_cur_budget(self):
    method get_curname (line 70) | def get_curname(self):
    method get_curdim (line 73) | def get_curdim(self):
    method get_curobj_info (line 76) | def get_curobj_info(self):
    method get_cur_fidelity_info (line 79) | def get_cur_fidelity_info(self) -> Dict:
    method get_cur_searchspace_info (line 82) | def get_cur_searchspace_info(self) -> Dict:
    method get_cur_searchspace (line 86) | def get_cur_searchspace(self) -> SearchSpace:
    method get_curtask (line 90) | def get_curtask(self):
    method get_cur_seed (line 94) | def get_cur_seed(self):
    method get_cur_task_id (line 97) | def get_cur_task_id(self):
    method get_cur_workload (line 100) | def get_cur_workload(self):
    method sync_query_num (line 104) | def sync_query_num(self, query_num: int):
    method roll (line 107) | def roll(self):
    method lock (line 110) | def lock(self):
    method unlock (line 113) | def unlock(self):
    method get_lockstate (line 116) | def get_lockstate(self):
    method get_task_type (line 119) | def get_task_type(self):
    method get_dataset_size (line 130) | def get_dataset_size(self):
    method get_var_by_idx (line 134) | def get_var_by_idx(self, idx):
    method get_idx_by_var (line 138) | def get_idx_by_var(self, vectors):
    method get_unobserved_vars (line 142) | def get_unobserved_vars(self):
    method get_unobserved_idxs (line 146) | def get_unobserved_idxs(self):
    method add_query_num (line 150) | def add_query_num(self):
    method f (line 154) | def f(
  class RemoteTransferOptBenchmark (line 211) | class RemoteTransferOptBenchmark(TransferProblem):
    method __init__ (line 212) | def __init__(
    method add_task_to_id (line 219) | def add_task_to_id(
    method f (line 231) | def f(
    method _package_data (line 261) | def _package_data(
    method _execute_experiment (line 278) | def _execute_experiment(self, data):

FILE: transopt/benchmark/synthetic/MovingPeakBenchmark.py
  class MovingPeakGenerator (line 15) | class MovingPeakGenerator:
    method __init__ (line 16) | def __init__(
    method get_MPB (line 99) | def get_MPB(self):
    method cal_width_shift (line 102) | def cal_width_shift(self):
    method cal_height_shift (line 106) | def cal_height_shift(self):
    method cal_peak_shift (line 110) | def cal_peak_shift(self, previous_shift):
    method change (line 116) | def change(self):
    method current_optimal (line 120) | def current_optimal(self, peak_shape=None):
    method transfer (line 127) | def transfer(self, X):
    method normalize (line 132) | def normalize(self, X):
    method optimizers (line 141) | def optimizers(self):
    method _fix_bound (line 149) | def _fix_bound(data, bound):
  class MovingPeakBenchmark (line 160) | class MovingPeakBenchmark(NonTabularProblem):
    method __init__ (line 161) | def __init__(
    method peak_function_cone (line 181) | def peak_function_cone(self, x):
    method peak_function_sharp (line 185) | def peak_function_sharp(self, x):
    method peak_function_hilly (line 189) | def peak_function_hilly(self, x):
    method objective_function (line 197) | def objective_function(
    method get_configuration_space (line 223) | def get_configuration_space(
    method get_fidelity_space (line 250) | def get_fidelity_space(
    method get_meta_information (line 271) | def get_meta_information(self) -> Dict:

FILE: transopt/benchmark/synthetic/MultiObjBenchmark.py
  class AckleySphereOptBenchmark (line 16) | class AckleySphereOptBenchmark(NonTabularProblem):
    method __init__ (line 17) | def __init__(
    method objective_function (line 51) | def objective_function(
    method get_configuration_space (line 78) | def get_configuration_space(
    method get_fidelity_space (line 105) | def get_fidelity_space(
    method get_meta_information (line 126) | def get_meta_information(self) -> Dict:

FILE: transopt/benchmark/synthetic/synthetic_problems.py
  class SyntheticProblemBase (line 19) | class SyntheticProblemBase(NonTabularProblem):
    method __init__ (line 25) | def __init__(
    method get_fidelity_space (line 36) | def get_fidelity_space(self) -> FidelitySpace:
    method get_objectives (line 40) | def get_objectives(self) -> Dict:
    method get_problem_type (line 43) | def get_problem_type(self):
  class SphereOptBenchmark (line 49) | class SphereOptBenchmark(SyntheticProblemBase):
    method __init__ (line 50) | def __init__(
    method objective_function (line 79) | def objective_function(
    method get_configuration_space (line 101) | def get_configuration_space(self) -> SearchSpace:
  class RastriginOptBenchmark (line 108) | class RastriginOptBenchmark(SyntheticProblemBase):
    method __init__ (line 109) | def __init__(
    method objective_function (line 138) | def objective_function(
    method get_configuration_space (line 161) | def get_configuration_space(self) -> SearchSpace:
  class SchwefelOptBenchmark (line 168) | class SchwefelOptBenchmark(SyntheticProblemBase):
    method __init__ (line 169) | def __init__(
    method objective_function (line 198) | def objective_function(
    method get_configuration_space (line 222) | def get_configuration_space(self) -> SearchSpace:
  class LevyROptBenchmark (line 230) | class LevyROptBenchmark(SyntheticProblemBase):
    method __init__ (line 231) | def __init__(
    method objective_function (line 260) | def objective_function(
    method get_configuration_space (line 291) | def get_configuration_space(self) -> SearchSpace:
  class GriewankOptBenchmark (line 298) | class GriewankOptBenchmark(SyntheticProblemBase):
    method __init__ (line 299) | def __init__(
    method objective_function (line 328) | def objective_function(
    method get_configuration_space (line 353) | def get_configuration_space(self) -> SearchSpace:
  class RosenbrockOptBenchmark (line 360) | class RosenbrockOptBenchmark(SyntheticProblemBase):
    method __init__ (line 361) | def __init__(
    method objective_function (line 390) | def objective_function(
    method get_configuration_space (line 415) | def get_configuration_space(self) -> SearchSpace:
  class DropwaveROptBenchmark (line 422) | class DropwaveROptBenchmark(SyntheticProblemBase):
    method __init__ (line 423) | def __init__(
    method objective_function (line 456) | def objective_function(
    method get_configuration_space (line 479) | def get_configuration_space(self) -> SearchSpace:
  class LangermannOptBenchmark (line 486) | class LangermannOptBenchmark(SyntheticProblemBase):
    method __init__ (line 487) | def __init__(
    method objective_function (line 520) | def objective_function(
    method get_configuration_space (line 546) | def get_configuration_space(self) -> SearchSpace:
  class RotatedHyperEllipsoidOptBenchmark (line 553) | class RotatedHyperEllipsoidOptBenchmark(SyntheticProblemBase):
    method __init__ (line 554) | def __init__(
    method objective_function (line 583) | def objective_function(
    method get_configuration_space (line 606) | def get_configuration_space(self) -> SearchSpace:
  class SumOfDifferentPowersOptBenchmark (line 613) | class SumOfDifferentPowersOptBenchmark(SyntheticProblemBase):
    method __init__ (line 614) | def __init__(
    method objective_function (line 643) | def objective_function(
    method get_configuration_space (line 667) | def get_configuration_space(self) -> SearchSpace:
  class StyblinskiTangOptBenchmark (line 674) | class StyblinskiTangOptBenchmark(SyntheticProblemBase):
    method __init__ (line 675) | def __init__(
    method objective_function (line 704) | def objective_function(
    method get_configuration_space (line 726) | def get_configuration_space(self) -> SearchSpace:
  class PowellOptBenchmark (line 733) | class PowellOptBenchmark(SyntheticProblemBase):
    method __init__ (line 734) | def __init__(
    method objective_function (line 763) | def objective_function(
    method get_configuration_space (line 792) | def get_configuration_space(self) -> SearchSpace:
  class DixonPriceOptBenchmark (line 799) | class DixonPriceOptBenchmark(SyntheticProblemBase):
    method __init__ (line 800) | def __init__(
    method objective_function (line 834) | def objective_function(
    method get_configuration_space (line 860) | def get_configuration_space(self) -> SearchSpace:
  class cpOptBenchmark (line 867) | class cpOptBenchmark(SyntheticProblemBase):
    method __init__ (line 868) | def __init__(
    method objective_function (line 902) | def objective_function(
    method get_configuration_space (line 928) | def get_configuration_space(self) -> SearchSpace:
  class mpbOptBenchmark (line 935) | class mpbOptBenchmark(SyntheticProblemBase):
    method __init__ (line 936) | def __init__(
    method objective_function (line 970) | def objective_function(
  function get_configuration_space (line 995) | def get_configuration_space(self) -> SearchSpace:
  class Ackley (line 1005) | class Ackley(SyntheticProblemBase):
    method __init__ (line 1006) | def __init__(
    method objective_function (line 1039) | def objective_function(
    method get_configuration_space (line 1063) | def get_configuration_space(self) -> SearchSpace:
  class EllipsoidOptBenchmark (line 1071) | class EllipsoidOptBenchmark(SyntheticProblemBase):
    method __init__ (line 1072) | def __init__(
    method objective_function (line 1103) | def objective_function(
    method get_configuration_space (line 1129) | def get_configuration_space(self) -> SearchSpace:
  class DiscusOptBenchmark (line 1136) | class DiscusOptBenchmark(SyntheticProblemBase):
    method __init__ (line 1137) | def __init__(
    method objective_function (line 1168) | def objective_function(
    method get_configuration_space (line 1194) | def get_configuration_space(self) -> SearchSpace:
  class BentCigarOptBenchmark (line 1201) | class BentCigarOptBenchmark(SyntheticProblemBase):
    method __init__ (line 1202) | def __init__(
    method objective_function (line 1233) | def objective_function(
    method get_configuration_space (line 1259) | def get_configuration_space(self) -> SearchSpace:
  class SharpRidgeOptBenchmark (line 1266) | class SharpRidgeOptBenchmark(SyntheticProblemBase):
    method __init__ (line 1267) | def __init__(
    method objective_function (line 1298) | def objective_function(
    method get_configuration_space (line 1329) | def get_configuration_space(self) -> SearchSpace:
  class GriewankRosenbrockOptBenchmark (line 1336) | class GriewankRosenbrockOptBenchmark(SyntheticProblemBase):
    method __init__ (line 1337) | def __init__(
    method objective_function (line 1366) | def objective_function(
    method get_configuration_space (line 1396) | def get_configuration_space(self) -> SearchSpace:
  class KatsuuraOptBenchmark (line 1403) | class KatsuuraOptBenchmark(SyntheticProblemBase):
    method __init__ (line 1404) | def __init__(
    method objective_function (line 1433) | def objective_function(
    method get_configuration_space (line 1465) | def get_configuration_space(self) -> SearchSpace:
  function visualize_function (line 1471) | def visualize_function(func_name, n_points=100):

FILE: transopt/datamanager/database.py
  class DatabaseDaemon (line 46) | class DatabaseDaemon:
    method __init__ (line 47) | def __init__(self, data_path, task_queue, result_queue, stop_event):
    method run (line 53) | def run(self):
  class Database (line 74) | class Database:
    method __init__ (line 75) | def __init__(self, db_file_name="database.db"):
    method close (line 99) | def close(self):
    method _execute (line 104) | def _execute(self, task, args=(), timeout=None, commit=True):
    method query_exec (line 116) | def query_exec(cursor, query, params, fetchone, fetchall, many):
    method execute (line 127) | def execute(
    method executemany (line 144) | def executemany(
    method start_transaction (line 161) | def start_transaction(self):
    method commit_transaction (line 164) | def commit_transaction(self):
    method rollback_transaction (line 167) | def rollback_transaction(self):
    method get_experiment_datasets (line 174) | def get_experiment_datasets(self):
    method get_all_datasets (line 185) | def get_all_datasets(self):
    method get_table_list (line 194) | def get_table_list(self):
    method check_table_exist (line 203) | def check_table_exist(self, name):
    method create_table (line 212) | def create_table(self, name, dataset_cfg, overwrite=False, is_experime...
    method remove_table (line 297) | def remove_table(self, name):
    method create_or_update_config (line 315) | def create_or_update_config(self, name, dataset_cfg, is_experiment=Tru...
    method query_config (line 338) | def query_config(self, name):
    method query_dataset_info (line 348) | def query_dataset_info(self, name):
    method create_or_update_metadata (line 372) | def create_or_update_metadata(self, table_name, metadata, commit=True):
    method get_all_metadata (line 413) | def get_all_metadata(self):
    method search_tables_by_metadata (line 420) | def search_tables_by_metadata(self, search_params):
    method insert_data (line 449) | def insert_data(
    method _get_conditions (line 513) | def _get_conditions(self, rowid=None, conditions=None):
    method update_data (line 557) | def update_data(self, table, data, rowid=None, conditions=None):
    method delete_data (line 598) | def delete_data(self, table, rowid=None, conditions=None):
    method select_data (line 617) | def select_data(
    method get_num_row (line 669) | def get_num_row(self, table):
    method get_column_names (line 673) | def get_column_names(self, table):

FILE: transopt/datamanager/lsh.py
  class LSHCache (line 6) | class LSHCache:
    method __init__ (line 7) | def __init__(self, hasher, num_bands=10):
    method add (line 31) | def add(self, key, vector):
    method query (line 67) | def query(self, vector):

FILE: transopt/datamanager/manager.py
  class DataManager (line 11) | class DataManager:
    method __new__ (line 15) | def __new__(cls, *args, **kwargs):
    method __init__ (line 21) | def __init__(
    method _initialize_lsh_cache (line 33) | def _initialize_lsh_cache(self, num_hashes, char_ngram, num_bands, ran...
    method _add_lsh_vector (line 45) | def _add_lsh_vector(self, dataset_name, dataset_info):
    method _construct_vector (line 49) | def _construct_vector(self, dataset_info):
    method search_similar_datasets (line 68) | def search_similar_datasets(self, problem_config):
    method search_datasets_by_name (line 73) | def search_datasets_by_name(self, dataset_name):
    method get_dataset_info (line 80) | def get_dataset_info(self, dataset_name):
    method get_experiment_datasets (line 83) | def get_experiment_datasets(self):
    method get_all_datasets (line 86) | def get_all_datasets(self):
    method create_dataset (line 89) | def create_dataset(self, dataset_name, dataset_info, overwrite=True):
    method insert_data (line 95) | def insert_data(self, dataset_name, data):
    method remove_dataset (line 98) | def remove_dataset(self, dataset_name):
    method teardown (line 101) | def teardown(self):
  function main (line 107) | def main():

FILE: transopt/datamanager/minhash.py
  class MinHasher (line 7) | class MinHasher:
    method __init__ (line 8) | def __init__(self, num_hashes, char_ngram, random_state=None):
    method num_seeds (line 30) | def num_seeds(self):
    method get_shingles (line 33) | def get_shingles(self, text):
    method fingerprint (line 40) | def fingerprint(self, text):
    method estimate_similarity (line 53) | def estimate_similarity(self, fp1, fp2):
  function jaccard_similarity (line 57) | def jaccard_similarity(set1, set2):

FILE: transopt/optimizer/MultiObjOptimizer/CauMOpt.py
  function calculate_gini_index (line 14) | def calculate_gini_index(labels):
  function features_by_gini (line 21) | def features_by_gini(data, labels):
  class CauMO (line 47) | class CauMO(BOBase):
    method __init__ (line 48) | def __init__(self, config: Dict, rate_oversampling = 4, seed = 0, **kw...
    method initial_sample (line 66) | def initial_sample(self):
    method random_sample (line 69) | def random_sample(self, num_samples: int) -> List[Dict]:
    method update_model (line 95) | def update_model(self, Data):
    method create_model (line 121) | def create_model(self, X, Y):
    method set_data (line 185) | def set_data(self, X, Y):
    method fit_data (line 203) | def fit_data(self, X, Y):
    method suggest (line 221) | def suggest(self, n_suggestions: Union[None, int] = None) -> List[Dict]:
    method observe (line 244) | def observe(self, input_vectors: Union[List[Dict], Dict], output_value...
    method predict (line 252) | def predict(self, X, full_cov=False):
    method raw_predict (line 287) | def raw_predict(self, X, model):
    method raw_predict_var (line 294) | def raw_predict_var(self, X, trees,  predictions, min_variance=0.1):
    method model_reset (line 313) | def model_reset(self):
    method get_fmin (line 317) | def get_fmin(self):
    method get_fmin_by_id (line 323) | def get_fmin_by_id(self, idx):

FILE: transopt/optimizer/MultiObjOptimizer/MoeadEGO.py
  class MoeadEGO (line 17) | class MoeadEGO(BOBase):
    method __init__ (line 18) | def __init__(self, config: Dict, **kwargs):
    method initial_sample (line 35) | def initial_sample(self):
    method random_sample (line 38) | def random_sample(self, num_samples: int) -> List[Dict]:
    method update_model (line 65) | def update_model(self, Data):
    method create_model (line 93) | def create_model(self, X, Y):
    method suggest (line 111) | def suggest(self, n_suggestions: Union[None, int] = None) -> List[Dict]:
    method predict (line 134) | def predict(self, X, full_cov=False):
    method predict_by_id (line 150) | def predict_by_id(self, X, idx, full_cov=False):
    method model_reset (line 164) | def model_reset(self):
    method get_fmin (line 168) | def get_fmin(self):
    method get_fmin_by_id (line 174) | def get_fmin_by_id(self, idx):

FILE: transopt/optimizer/MultiObjOptimizer/ParEGO.py
  class ParEGO (line 12) | class ParEGO(BOBase):
    method __init__ (line 13) | def __init__(self, config: Dict, **kwargs):
    method scalarization (line 31) | def scalarization(self, Y: np.ndarray, rho):
    method initial_sample (line 45) | def initial_sample(self):
    method suggest (line 48) | def suggest(self, n_suggestions: Union[None, int] = None) -> List[Dict]:
    method update_model (line 73) | def update_model(self, Data):
    method create_model (line 99) | def create_model(self, X, Y):
    method predict (line 113) | def predict(self, X, full_cov=False):
    method random_sample (line 129) | def random_sample(self, num_samples: int) -> List[Dict]:
    method model_reset (line 155) | def model_reset(self):
    method get_fmin (line 159) | def get_fmin(self):

FILE: transopt/optimizer/MultiObjOptimizer/SMSEGO.py
  class SMSEGO (line 15) | class SMSEGO(BOBase):
    method __init__ (line 16) | def __init__(self, config:Dict, **kwargs):
    method initial_sample (line 33) | def initial_sample(self):
    method suggest (line 36) | def suggest(self, n_suggestions:Union[None, int] = None) ->List[Dict]:
    method update_model (line 56) | def update_model(self, Data):
    method create_model (line 81) | def create_model(self, X, Y):
    method predict (line 97) | def predict(self, X, full_cov=False):
    method random_sample (line 116) | def random_sample(self, num_samples: int) -> List[Dict]:
    method model_reset (line 140) | def model_reset(self):
    method get_fmin (line 144) | def get_fmin(self):

FILE: transopt/optimizer/SingleObjOptimizer/KrigingOptimizer.py
  class KrigingEA (line 18) | class KrigingEA(BOBase):
    method __init__ (line 19) | def __init__(self, config: Dict, **kwargs):
    method initial_sample (line 57) | def initial_sample(self):
    method suggest (line 60) | def suggest(self, n_suggestions: Union[None, int] = None) -> List[Dict]:
    method observe (line 82) | def observe(self, input_vectors: Union[List[Dict], Dict], output_value...
    method update_model (line 107) | def update_model(self, Data):
    method create_model (line 129) | def create_model(self, X, Y):
    method create_ea (line 134) | def create_ea(self):
    method predict (line 145) | def predict(self, X):
    method sample (line 152) | def sample(self, num_samples: int) -> List[Dict]:
    method model_reset (line 178) | def model_reset(self):
    method get_fmin (line 181) | def get_fmin(self):
    method reset (line 185) | def reset(self, task_name:str, design_space:Dict, search_sapce:Union[N...
    method model_manage_strategy (line 193) | def model_manage_strategy(self):
  class EAProblem (line 225) | class EAProblem(Problem):
    method __init__ (line 226) | def __init__(self, space, predict):
    method _evaluate (line 239) | def _evaluate(self, x, out, *args, **kwargs):

FILE: transopt/optimizer/SingleObjOptimizer/LFL.py
  class LFLOptimizer (line 23) | class LFLOptimizer(BOBase):
    method __init__ (line 24) | def __init__(self, config:Dict, **kwargs):
    method reset (line 51) | def reset(self, design_space:Dict, search_sapce:Union[None, Dict] = No...
    method initial_sample (line 60) | def initial_sample(self):
    method random_sample (line 71) | def random_sample(self, num_samples: int) -> List[Dict]:
    method combine_data (line 97) | def combine_data(self):
    method suggest (line 103) | def suggest(self, n_suggestions:Union[None, int] = None)->List[Dict]:
    method create_model (line 127) | def create_model(self, X_list, Y_list, mf=None, prior:list=[]):
    method update_model (line 166) | def update_model(self, Data):
    method predict (line 218) | def predict(self, X):
    method var_predict (line 243) | def var_predict(self, X):
    method obj_posterior_samples (line 261) | def obj_posterior_samples(self, X, sample_size):
    method get_fmin (line 283) | def get_fmin(self):
    method set_XY (line 289) | def set_XY(self, X=None, Y=None):
    method samples (line 307) | def samples(self, gp):
    method posterior_samples_f (line 320) | def posterior_samples_f(self,X, model_id, size=10):
    method posterior_samples (line 339) | def posterior_samples(self, X, model_id, size=10):
    method get_model_para (line 361) | def get_model_para(self):
    method update_prior (line 372) | def update_prior(self, parameters):

FILE: transopt/optimizer/SingleObjOptimizer/MetaLearningOptimizer.py
  function get_model (line 28) | def get_model(
  class MetaBOOptimizer (line 42) | class MetaBOOptimizer(OptimizerBase):
    method __init__ (line 45) | def __init__(self, Xdim, bounds, kernel='RBF', likelihood=None, model_...
    method create_model (line 71) | def create_model(self, model_name, Meta_data, Target_data):
    method updateModel (line 112) | def updateModel(self, Target_data):
    method resetModel (line 131) | def resetModel(self, Source_data, Target_data):
    method predict (line 137) | def predict(self, X):
    method get_fmin (line 183) | def get_fmin(self):
    method set_XY (line 196) | def set_XY(self, X=None, Y=None):
    method samples (line 214) | def samples(self, gp):
    method posterior_samples_f (line 227) | def posterior_samples_f(self,X, model_id, size=10):
    method posterior_samples (line 246) | def posterior_samples(self, X, model_id, size=10):

FILE: transopt/optimizer/SingleObjOptimizer/MultitaskOptimizer.py
  class MultitaskBO (line 17) | class MultitaskBO(BOBase):
    method __init__ (line 18) | def __init__(self, config:Dict, **kwargs):
    method initial_sample (line 39) | def initial_sample(self):
    method random_sample (line 42) | def random_sample(self, num_samples: int) -> List[Dict]:
    method suggest (line 66) | def suggest(self, n_suggestions:Union[None, int] = None) ->List[Dict]:
    method update_model (line 90) | def update_model(self, Data):
    method create_model (line 125) | def create_model(self, X_list, Y_list, mf=None, prior:list=[]):
    method set_XY (line 151) | def set_XY(self, X=None, Y=None):
    method model_reset (line 169) | def model_reset(self):
    method predict (line 172) | def predict(self, X):
    method get_fmin (line 196) | def get_fmin(self):

FILE: transopt/optimizer/SingleObjOptimizer/PROptimizer.py
  class PREA (line 19) | class PREA(BayesianOptimizerBase):
    method __init__ (line 20) | def __init__(self, config: Dict, **kwargs):
    method initial_sample (line 63) | def initial_sample(self):
    method suggest (line 66) | def suggest(self, n_suggestions: Union[None, int] = None) -> List[Dict]:
    method observe (line 88) | def observe(self, input_vectors: Union[List[Dict], Dict], output_value...
    method update_model (line 115) | def update_model(self, Data):
    method create_model (line 132) | def create_model(self, X, Y):
    method create_ea (line 138) | def create_ea(self):
    method predict (line 149) | def predict(self, X):
    method sample (line 157) | def sample(self, num_samples: int) -> List[Dict]:
    method model_reset (line 183) | def model_reset(self):
    method get_fmin (line 186) | def get_fmin(self):
    method reset (line 190) | def reset(self, task_name: str, design_space: Dict, search_sapce: Unio...
    method model_manage_strategy (line 198) | def model_manage_strategy(self):
  class EAProblem (line 230) | class EAProblem(Problem):
    method __init__ (line 231) | def __init__(self, space, predict):
    method _evaluate (line 244) | def _evaluate(self, x, out, *args, **kwargs):

FILE: transopt/optimizer/SingleObjOptimizer/RBFNOptimizer.py
  class RbfnEA (line 19) | class RbfnEA(BayesianOptimizerBase):
    method __init__ (line 20) | def __init__(self, config: Dict, **kwargs):
    method initial_sample (line 78) | def initial_sample(self):
    method suggest (line 81) | def suggest(self, n_suggestions: Union[None, int] = None) -> List[Dict]:
    method observe (line 103) | def observe(self, input_vectors: Union[List[Dict], Dict], output_value...
    method update_model (line 127) | def update_model(self, Data):
    method create_model (line 150) | def create_model(self, X, Y):
    method create_ea (line 158) | def create_ea(self):
    method predict (line 169) | def predict(self, X):
    method sample (line 176) | def sample(self, num_samples: int) -> List[Dict]:
    method model_reset (line 202) | def model_reset(self):
    method get_fmin (line 205) | def get_fmin(self):
    method reset (line 210) | def reset(self, task_name: str, design_space: Dict, search_sapce: Unio...
    method model_manage_strategy (line 218) | def model_manage_strategy(self):
  class EAProblem (line 249) | class EAProblem(Problem):
    method __init__ (line 250) | def __init__(self, space, predict):
    method _evaluate (line 263) | def _evaluate(self, x, out, *args, **kwargs):

FILE: transopt/optimizer/SingleObjOptimizer/RGPEOptimizer.py
  class RGPEOptimizer (line 12) | class RGPEOptimizer(BOBase):
    method __init__ (line 13) | def __init__(self, config: Dict, **kwargs):
    method initial_sample (line 33) | def initial_sample(self):
    method random_sample (line 36) | def random_sample(self, num_samples: int) -> List[Dict]:
    method model_reset (line 60) | def model_reset(self):
    method meta_update (line 69) | def meta_update(self):
    method suggest (line 71) | def suggest(self, n_suggestions:Union[None, int] = None) ->List[Dict]:
    method create_model (line 91) | def create_model(self):
    method create_model (line 94) | def create_model(self, model_name, Source_data, Target_data):
    method updateModel (line 133) | def updateModel(self, Target_data):
    method reset_target (line 152) | def reset_target(self):
    method meta_add (line 155) | def meta_add(self, meta_data):
    method resetModel (line 158) | def resetModel(self, Source_data, Target_data):
    method get_train_time (line 162) | def get_train_time(self):
    method get_fit_time (line 165) | def get_fit_time(self):
    method predict (line 169) | def predict(
    method obj_posterior_samples (line 179) | def obj_posterior_samples(self, X, sample_size):
    method update_model (line 193) | def update_model(self, Data: Dict):
    method get_fmin (line 205) | def get_fmin(self):
    method set_XY (line 211) | def set_XY(self, X=None, Y=None):
    method samples (line 229) | def samples(self, gp):
    method posterior_samples_f (line 242) | def posterior_samples_f(self,X, model_id, size=10):
    method posterior_samples (line 261) | def posterior_samples(self, X, model_id, size=10):

FILE: transopt/optimizer/SingleObjOptimizer/TPEOptimizer.py
  class TPEOptimizer (line 11) | class TPEOptimizer(BOBase):
    method __init__ (line 12) | def __init__(self, config:Dict, **kwargs):
    method initial_sample (line 37) | def initial_sample(self):
    method suggest (line 40) | def suggest(self, n_suggestions:Union[None, int] = None) ->List[Dict]:
    method update_model (line 60) | def update_model(self, Data):
    method create_model (line 80) | def create_model(self, X, Y):
    method predict (line 85) | def predict(self, X):
    method random_sample (line 102) | def random_sample(self, num_samples: int) -> List[Dict]:
    method model_reset (line 126) | def model_reset(self):
    method get_fmin (line 129) | def get_fmin(self):
    method posterior_samples (line 135) | def posterior_samples(self, X, model_id, size=10):

FILE: transopt/optimizer/SingleObjOptimizer/VizerOptimizer.py
  class Vizer (line 12) | class Vizer(BOBase):
    method __init__ (line 14) | def __init__(self, config: Dict, **kwargs):
    method model_reset (line 34) | def model_reset(self):
    method initial_sample (line 43) | def initial_sample(self):
    method random_sample (line 46) | def random_sample(self, num_samples: int) -> List[Dict]:
    method suggest (line 70) | def suggest(self, n_suggestions:Union[None, int] = None) ->List[Dict]:
    method meta_update (line 90) | def meta_update(self):
    method meta_add (line 93) | def meta_add(self, Data:List[Dict]):
    method create_model (line 96) | def create_model(self):
    method update_model (line 100) | def update_model(self, Data):
    method MetaFitModel (line 113) | def MetaFitModel(self, metadata):
    method get_train_time (line 125) | def get_train_time(self):
    method get_fit_time (line 128) | def get_fit_time(self):
    method predict (line 132) | def predict(
    method obj_posterior_samples (line 142) | def obj_posterior_samples(self, X, sample_size):
    method get_fmin (line 164) | def get_fmin(self):
    method samples (line 170) | def samples(self, gp):
    method posterior_samples_f (line 184) | def posterior_samples_f(self,X, model_id, size=10):
    method posterior_samples (line 203) | def posterior_samples(self, X, model_id, size=10):

FILE: transopt/optimizer/acquisition_function/ConformalLCB.py
  class ConformalLCB (line 8) | class ConformalLCB(AcquisitionBase):
    method __init__ (line 28) | def __init__(self, model, space, optimizer, config):
    method _compute_acq (line 36) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 48) | def _compute_acq_withGradients(self, x):

FILE: transopt/optimizer/acquisition_function/acf_base.py
  class AcquisitionBase (line 11) | class AcquisitionBase(object):
    method __init__ (line 23) | def __init__(self, cost_withGradients=None, **kwargs):
    method fromDict (line 37) | def fromDict(model, space, optimizer, cost_withGradients, config):
    method link (line 40) | def link(self, model, space):
    method link_model (line 44) | def link_model(self, model):
    method link_space (line 47) | def link_space(self, space):
    method acquisition_function (line 64) | def acquisition_function(self,x):
    method acquisition_function_withGradients (line 74) | def acquisition_function_withGradients(self, x):
    method optimize (line 85) | def optimize(self, duplicate_manager=None):
    method _compute_acq (line 95) | def _compute_acq(self,x):
    method _compute_acq_withGradients (line 99) | def _compute_acq_withGradients(self, x):

FILE: transopt/optimizer/acquisition_function/ei.py
  class AcquisitionEI (line 11) | class AcquisitionEI(AcquisitionBase):
    method __init__ (line 24) | def __init__(self, config):
    method _compute_acq (line 38) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 47) | def _compute_acq_withGradients(self, x):

FILE: transopt/optimizer/acquisition_function/get_acf.py
  function get_acf (line 4) | def get_acf(acf_name, **kwargs):

FILE: transopt/optimizer/acquisition_function/lcb.py
  class AcquisitionLCB (line 10) | class AcquisitionLCB(AcquisitionBase):
    method __init__ (line 30) | def __init__(self, config):
    method _compute_acq (line 37) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 45) | def _compute_acq_withGradients(self, x):

FILE: transopt/optimizer/acquisition_function/model_manage/CMAESBest.py
  class CMAESBest (line 13) | class CMAESBest(AcquisitionBase):
    method __init__ (line 16) | def __init__(self, config):
    method link_space (line 39) | def link_space(self, space):
    method optimize (line 61) | def optimize(self, duplicate_manager=None):
    method _compute_acq (line 71) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 74) | def _compute_acq_withGradients(self, x):
  class EAProblem (line 78) | class EAProblem(Problem):
    method __init__ (line 79) | def __init__(self, space, predict):
    method _evaluate (line 92) | def _evaluate(self, x, out, *args, **kwargs):

FILE: transopt/optimizer/acquisition_function/model_manage/CMAESGeneration.py
  class CMAESGeneration (line 11) | class CMAESGeneration(AcquisitionBase):
    method __init__ (line 14) | def __init__(self, config):
    method link_space (line 37) | def link_space(self, space):
    method optimize (line 59) | def optimize(self, duplicate_manager=None):
    method _compute_acq (line 70) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 73) | def _compute_acq_withGradients(self, x):
  class EAProblem (line 77) | class EAProblem(Problem):
    method __init__ (line 78) | def __init__(self, space, predict):
    method _evaluate (line 91) | def _evaluate(self, x, out, *args, **kwargs):

FILE: transopt/optimizer/acquisition_function/model_manage/CMAESPreSelect.py
  class CMAESPreSelect (line 11) | class CMAESPreSelect(AcquisitionBase):
    method __init__ (line 14) | def __init__(self, config):
    method link_space (line 37) | def link_space(self, space):
    method optimize (line 59) | def optimize(self, duplicate_manager=None):
    method _compute_acq (line 78) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 81) | def _compute_acq_withGradients(self, x):
  class EAProblem (line 85) | class EAProblem(Problem):
    method __init__ (line 86) | def __init__(self, space, predict):
    method _evaluate (line 99) | def _evaluate(self, x, out, *args, **kwargs):

FILE: transopt/optimizer/acquisition_function/model_manage/DEBest.py
  class DEBest (line 11) | class DEBest(AcquisitionBase):
    method __init__ (line 14) | def __init__(self, config):
    method link_space (line 37) | def link_space(self, space):
    method optimize (line 59) | def optimize(self, duplicate_manager=None):
    method _compute_acq (line 69) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 72) | def _compute_acq_withGradients(self, x):
  class EAProblem (line 76) | class EAProblem(Problem):
    method __init__ (line 77) | def __init__(self, space, predict):
    method _evaluate (line 90) | def _evaluate(self, x, out, *args, **kwargs):

FILE: transopt/optimizer/acquisition_function/model_manage/DEGeneration.py
  class DEGeneration (line 11) | class DEGeneration(AcquisitionBase):
    method __init__ (line 14) | def __init__(self, config):
    method link_space (line 37) | def link_space(self, space):
    method optimize (line 59) | def optimize(self, duplicate_manager=None):
    method _compute_acq (line 70) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 73) | def _compute_acq_withGradients(self, x):
  class EAProblem (line 77) | class EAProblem(Problem):
    method __init__ (line 78) | def __init__(self, space, predict):
    method _evaluate (line 91) | def _evaluate(self, x, out, *args, **kwargs):

FILE: transopt/optimizer/acquisition_function/model_manage/DEPreSelect.py
  class DEPreSelect (line 11) | class DEPreSelect(AcquisitionBase):
    method __init__ (line 14) | def __init__(self, config):
    method link_space (line 37) | def link_space(self, space):
    method optimize (line 59) | def optimize(self, duplicate_manager=None):
    method _compute_acq (line 78) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 81) | def _compute_acq_withGradients(self, x):
  class EAProblem (line 85) | class EAProblem(Problem):
    method __init__ (line 86) | def __init__(self, space, predict):
    method _evaluate (line 99) | def _evaluate(self, x, out, *args, **kwargs):

FILE: transopt/optimizer/acquisition_function/model_manage/GABest.py
  class GABest (line 11) | class GABest(AcquisitionBase):
    method __init__ (line 14) | def __init__(self, config):
    method link_space (line 37) | def link_space(self, space):
    method optimize (line 59) | def optimize(self, duplicate_manager=None):
    method _compute_acq (line 69) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 72) | def _compute_acq_withGradients(self, x):
  class EAProblem (line 76) | class EAProblem(Problem):
    method __init__ (line 77) | def __init__(self, space, predict):
    method _evaluate (line 90) | def _evaluate(self, x, out, *args, **kwargs):

FILE: transopt/optimizer/acquisition_function/model_manage/GAGeneration.py
  class GAGeneration (line 11) | class GAGeneration(AcquisitionBase):
    method __init__ (line 14) | def __init__(self, config):
    method link_space (line 37) | def link_space(self, space):
    method optimize (line 59) | def optimize(self, duplicate_manager=None):
    method _compute_acq (line 70) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 73) | def _compute_acq_withGradients(self, x):
  class EAProblem (line 77) | class EAProblem(Problem):
    method __init__ (line 78) | def __init__(self, space, predict):
    method _evaluate (line 91) | def _evaluate(self, x, out, *args, **kwargs):

FILE: transopt/optimizer/acquisition_function/model_manage/GAPreSelect.py
  class GAPreSelect (line 11) | class GAPreSelect(AcquisitionBase):
    method __init__ (line 14) | def __init__(self, config):
    method link_space (line 37) | def link_space(self, space):
    method optimize (line 59) | def optimize(self, duplicate_manager=None):
    method _compute_acq (line 78) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 81) | def _compute_acq_withGradients(self, x):
  class EAProblem (line 85) | class EAProblem(Problem):
    method __init__ (line 86) | def __init__(self, space, predict):
    method _evaluate (line 99) | def _evaluate(self, x, out, *args, **kwargs):

FILE: transopt/optimizer/acquisition_function/model_manage/PSOBest.py
  class PSOBest (line 11) | class PSOBest(AcquisitionBase):
    method __init__ (line 14) | def __init__(self, config):
    method link_space (line 37) | def link_space(self, space):
    method optimize (line 59) | def optimize(self, duplicate_manager=None):
    method _compute_acq (line 69) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 72) | def _compute_acq_withGradients(self, x):
  class EAProblem (line 76) | class EAProblem(Problem):
    method __init__ (line 77) | def __init__(self, space, predict):
    method _evaluate (line 90) | def _evaluate(self, x, out, *args, **kwargs):

FILE: transopt/optimizer/acquisition_function/model_manage/PSOGeneration.py
  class PSOGeneration (line 11) | class PSOGeneration(AcquisitionBase):
    method __init__ (line 14) | def __init__(self, config):
    method link_space (line 37) | def link_space(self, space):
    method optimize (line 59) | def optimize(self, duplicate_manager=None):
    method _compute_acq (line 70) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 73) | def _compute_acq_withGradients(self, x):
  class EAProblem (line 77) | class EAProblem(Problem):
    method __init__ (line 78) | def __init__(self, space, predict):
    method _evaluate (line 91) | def _evaluate(self, x, out, *args, **kwargs):

FILE: transopt/optimizer/acquisition_function/model_manage/PSOPreSelect.py
  class PSOPreSelect (line 11) | class PSOPreSelect(AcquisitionBase):
    method __init__ (line 14) | def __init__(self, config):
    method link_space (line 37) | def link_space(self, space):
    method optimize (line 59) | def optimize(self, duplicate_manager=None):
    method _compute_acq (line 78) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 81) | def _compute_acq_withGradients(self, x):
  class EAProblem (line 85) | class EAProblem(Problem):
    method __init__ (line 86) | def __init__(self, space, predict):
    method _evaluate (line 99) | def _evaluate(self, x, out, *args, **kwargs):

FILE: transopt/optimizer/acquisition_function/moeadego.py
  class MOEADEGO (line 14) | class MOEADEGO:
    method __init__ (line 15) | def __init__(self, model, space, optimizer, config):
    method _compute_acq (line 28) | def _compute_acq(self, x):
    method set_model_id (line 36) | def set_model_id(self, idx):
    method optimize (line 38) | def optimize(self, duplicate_manager=None):

FILE: transopt/optimizer/acquisition_function/pi.py
  class AcquisitionPI (line 10) | class AcquisitionPI(AcquisitionBase):
    method __init__ (line 23) | def __init__(self, config):
    method _compute_acq (line 36) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 45) | def _compute_acq_withGradients(self, x):

FILE: transopt/optimizer/acquisition_function/piei.py
  class AcquisitionpiEI (line 10) | class AcquisitionpiEI(AcquisitionBase):
    method __init__ (line 23) | def __init__(self, Model, space, optimizer, cost_withGradients=None, j...
    method _compute_acq (line 35) | def _compute_acq(self, x):
    method _compute_prior (line 45) | def _compute_prior(self, x):
    method _compute_acq_withGradients (line 48) | def _compute_acq_withGradients(self, x):

FILE: transopt/optimizer/acquisition_function/sequential.py
  class Sequential (line 4) | class Sequential(EvaluatorBase):
    method __init__ (line 12) | def __init__(self, acquisition, batch_size=1):
    method compute_batch (line 15) | def compute_batch(self, duplicate_manager=None,context_manager=None):

FILE: transopt/optimizer/acquisition_function/smsego.py
  class SMSEGO (line 13) | class SMSEGO:
    method __init__ (line 14) | def __init__(self, model, space, optimizer, config):
    method _compute_acq (line 21) | def _compute_acq(self, x):
    method optimize (line 39) | def optimize(self, duplicate_manager=None):

FILE: transopt/optimizer/acquisition_function/taf.py
  class AcquisitionTAF (line 12) | class AcquisitionTAF(AcquisitionBase):
    method __init__ (line 25) | def __init__(self, config):
    method _compute_acq (line 39) | def _compute_acq(self, x):
    method _compute_acq_withGradients (line 62) | def _compute_acq_withGradients(self, x):

FILE: transopt/optimizer/construct_optimizer.py
  function ConstructOptimizer (line 9) | def ConstructOptimizer(optimizer_config: dict = None, seed: int = 0) -> BO:
  function ConstructSelector (line 59) | def ConstructSelector(optimizer_config, dict = None, seed: int = 0):

FILE: transopt/optimizer/model/HyperBO.py
  class hyperbo (line 31) | class hyperbo():
    method __init__ (line 32) | def __init__(self, seed = 0):
    method pretrain (line 56) | def pretrain(self, Meta_data, Target_data):
    method retrain (line 86) | def retrain(self, Target_data):
    method predict (line 107) | def predict(self, X, subset_data_id:Union[int, str] = 0):

FILE: transopt/optimizer/model/bohb.py
  class KDEMultivariate (line 9) | class KDEMultivariate(sm.nonparametric.KDEMultivariate):
    method __init__ (line 10) | def __init__(self, configurations):
  class Log (line 19) | class Log():
    method __init__ (line 20) | def __init__(self, size):
    method __getitem__ (line 25) | def __getitem__(self, index):
    method __setitem__ (line 28) | def __setitem__(self, index, value):
    method __repr__ (line 31) | def __repr__(self):
  class BOHB (line 47) | class BOHB:
    method __init__ (line 48) | def __init__(self, configspace, evaluate, max_budget, min_budget,
    method optimize (line 71) | def optimize(self):
    method get_sample (line 123) | def get_sample(self):

FILE: transopt/optimizer/model/deepkernel.py
  class Metric (line 30) | class Metric(object):
    method __init__ (line 31) | def __init__(self,prefix='train: '):
    method update (line 35) | def update(self,loss,noise,mse):
    method reset (line 40) | def reset(self,):
    method report (line 45) | def report(self):
    method get (line 50) | def get(self):
  function totorch (line 56) | def totorch(x,device):
  class MLP (line 60) | class MLP(nn.Module):
    method __init__ (line 61) | def __init__(self, input_size, hidden_size=[32,32,32,32], dropout=0.0):
    method forward (line 70) | def forward(self,x):
  class ExactGPLayer (line 80) | class ExactGPLayer(gpytorch.models.ExactGP):
    method __init__ (line 81) | def __init__(self, train_x, train_y, likelihood,config,dims ):
    method forward (line 92) | def forward(self, x):
  class DeepKernelGP (line 99) | class DeepKernelGP(nn.Module):
    method __init__ (line 100) | def __init__(self, config = {}):
    method get_model_likelihood_mll (line 126) | def get_model_likelihood_mll(self, train_size):
    method fit (line 139) | def fit(self,
    method load_checkpoint (line 192) | def load_checkpoint(self, checkpoint):
    method predict (line 199) | def predict(self, X_pen):
    method continuous_maximization (line 219) | def continuous_maximization( self, dim, bounds, acqf):
    method get_fmin (line 225) | def get_fmin(self):

FILE: transopt/optimizer/model/dyhpo.py
  class FeatureExtractor (line 15) | class FeatureExtractor(nn.Module):
    method __init__ (line 19) | def __init__(self, configuration):
    method forward (line 58) | def forward(self, x, budgets, learning_curves):
  class GPRegressionModel (line 91) | class GPRegressionModel(gpytorch.models.ExactGP):
    method __init__ (line 95) | def __init__(
    method forward (line 114) | def forward(self, x):
  class DyHPO (line 122) | class DyHPO:
    method __init__ (line 126) | def __init__(
    method restart_optimization (line 195) | def restart_optimization(self):
    method get_model_likelihood_mll (line 210) | def get_model_likelihood_mll(
    method train_pipeline (line 233) | def train_pipeline(self, data: Dict[str, torch.Tensor], load_checkpoin...
    method predict_pipeline (line 334) | def predict_pipeline(
    method load_checkpoint (line 374) | def load_checkpoint(self):
    method save_checkpoint (line 383) | def save_checkpoint(self, state: Dict =None):
    method get_state (line 404) | def get_state(self) -> Dict[str, Dict]:

FILE: transopt/optimizer/model/get_model.py
  function get_model (line 5) | def get_model(model_name, **kwargs):

FILE: transopt/optimizer/model/gp.py
  class GP (line 15) | class GP(Model):
    method __init__ (line 17) | def __init__(
    method kernel (line 44) | def kernel(self):
    method noise_variance (line 49) | def noise_variance(self):
    method kernel (line 54) | def kernel(self, kernel: Kern):
    method meta_fit (line 73) | def meta_fit(
    method fit (line 81) | def fit(
    method predict (line 126) | def predict(
    method _raw_predict (line 136) | def _raw_predict(
    method predict_posterior_mean (line 164) | def predict_posterior_mean(self, X) -> np.ndarray:
    method predict_posterior_covariance (line 188) | def predict_posterior_covariance(self, x1, x2) -> np.ndarray:
    method compute_kernel (line 215) | def compute_kernel(self, x1, x2) -> np.ndarray:
    method compute_kernel_diagonal (line 232) | def compute_kernel_diagonal(self, X) -> np.ndarray:
    method sample (line 249) | def sample(
    method get_fmin (line 271) | def get_fmin(self):

FILE: transopt/optimizer/model/hebo.py
  class HEBO (line 41) | class HEBO(AbstractOptimizer):
    method __init__ (line 45) | def __init__(self, space, model_name = 'gpy', rand_sample = None, acq_...
    method quasi_sample (line 64) | def quasi_sample(self, n, fix_input = None):
    method model_config (line 79) | def model_config(self):
    method get_best_id (line 112) | def get_best_id(self, fix_input : dict = None) -> int:
    method suggest (line 128) | def suggest(self, n_suggestions=1, fix_input = None):
    method check_unique (line 199) | def check_unique(self, rec : pd.DataFrame) -> [bool]:
    method observe (line 202) | def observe(self, X, y):
    method best_x (line 221) | def best_x(self)->pd.DataFrame:
    method best_y (line 228) | def best_y(self)->float:

FILE: transopt/optimizer/model/mhgp.py
  class MHGP (line 27) | class MHGP(Model):
    method __init__ (line 38) | def __init__(self,
    method _compute_residuals (line 62) | def _compute_residuals(self, X: np.ndarray, Y: np.ndarray) -> np.ndarray:
    method _update_meta_data (line 89) | def _update_meta_data(self, *gps: GP):
    method _meta_fit_single_gp (line 94) | def _meta_fit_single_gp(
    method meta_fit (line 124) | def meta_fit(
    method fit (line 156) | def fit(
    method predict (line 184) | def predict(
    method predict_posterior_mean (line 202) | def predict_posterior_mean(self, X: np.ndarray, idx: int = None) -> np...
    method predict_posterior_covariance (line 232) | def predict_posterior_covariance(self, x1: np.ndarray, x2: np.ndarray)...
    method get_fmin (line 244) | def get_fmin(self):

FILE: transopt/optimizer/model/mlp.py
  function compute_irm_penalty (line 21) | def compute_irm_penalty(losses, dummy):
  class Net (line 26) | class Net(nn.Module):
    method __init__ (line 27) | def __init__(self, input_dim, dropout_rate=0.3):
    method forward (line 35) | def forward(self, x):
  class MLP (line 47) | class MLP(Model):
    method __init__ (line 48) | def __init__(self, config):
    method meta_fit (line 58) | def meta_fit(
    method fit (line 66) | def fit(
    method predict (line 150) | def predict(
    method get_fmin (line 163) | def get_fmin(self):
    method save_plots (line 168) | def save_plots(self, train_losses, val_losses, X_val, y_val, output_di...

FILE: transopt/optimizer/model/model_base.py
  class Model (line 6) | class Model(ABC):
    method __init__ (line 9) | def __init__(self):
    method X (line 15) | def X(self) -> np.ndarray:
    method y (line 20) | def y(self) -> np.ndarray:
    method meta_fit (line 25) | def meta_fit(self, metadata, **kwargs):
    method fit (line 37) | def fit(self, X, Y, **kwargs):
    method predict (line 48) | def predict(self, X) -> (np.ndarray, np.ndarray):

FILE: transopt/optimizer/model/moeadego.py
  class MoeadEGO (line 12) | class MoeadEGO(Model):
    method __init__ (line 13) | def __init__(
    method fit (line 35) | def fit(self, X, Y):
    method predict (line 43) | def predict(self, X, full_cov=False):
    method _create_model (line 46) | def _create_model(self, X, Y):
    method _update_model (line 64) | def _update_model(self, X, Y):
    method _make_prediction (line 81) | def _make_prediction(self, X, full_cov=False):
    method _make_prediction_by_id (line 98) | def _make_prediction_by_id(self, X, idx, full_cov=False):

FILE: transopt/optimizer/model/mtgp.py
  class MTGP (line 29) | class MTGP(GP):
    method __init__ (line 58) | def __init__(
    method meta_fit (line 79) | def meta_fit(
    method fit (line 95) | def fit(
    method _raw_predict (line 163) | def _raw_predict(

FILE: transopt/optimizer/model/neuralprocess.py
  class NeuralProcess (line 9) | class NeuralProcess(Model):
    method __init__ (line 10) | def __init__(self):

FILE: transopt/optimizer/model/parego.py
  class ParEGO (line 10) | class ParEGO(Model):
    method __init__ (line 11) | def __init__(self, seed=0, normalize=True, **options):
    method fit (line 21) | def fit(self, X, Y):
    method predict (line 29) | def predict(self, X, full_cov=False):
    method _scalarization (line 32) | def _scalarization(self, Y: np.ndarray, rho):
    method _create_model (line 43) | def _create_model(self, X, Y):
    method _update_model (line 50) | def _update_model(self, X, Y):
    method _make_prediction (line 63) | def _make_prediction(self, X, full_cov=False):

FILE: transopt/optimizer/model/pr.py
  class PR (line 12) | class PR(Model):
    method __init__ (line 13) | def __init__(
    method meta_fit (line 29) | def meta_fit(
    method fit (line 37) | def fit(
    method predict (line 63) | def predict(

FILE: transopt/optimizer/model/rbfn.py
  class RegressionDataset (line 16) | class RegressionDataset(Dataset):
    method __init__ (line 18) | def __init__(self, inputs, targets):
    method __len__ (line 22) | def __len__(self):
    method __getitem__ (line 25) | def __getitem__(self, index):
  class RbfNet (line 31) | class RbfNet(nn.Module):
    method __init__ (line 32) | def __init__(self, centers, beta):
    method kernel_fun (line 40) | def kernel_fun(self, batches):
    method forward (line 47) | def forward(self, x):
  class rbfn (line 53) | class rbfn(object):
    method __init__ (line 54) | def __init__(self, dataset, max_epoch=30, batch_size=5, lr=0.01, num_c...
    method train (line 78) | def train(self):
    method predict (line 98) | def predict(self, x):
    method cluster (line 105) | def cluster(self):
    method calculate_beta (line 111) | def calculate_beta(self):
    method update_dataset (line 120) | def update_dataset(self, dataset):
  class RBFN (line 130) | class RBFN(Model):
    method __init__ (line 131) | def __init__(
    method meta_fit (line 155) | def meta_fit(
    method fit (line 163) | def fit(
    method predict (line 199) | def predict(

FILE: transopt/optimizer/model/rf.py
  class RF (line 27) | class RF(Model):
    method __init__ (line 28) | def __init__(
    method meta_fit (line 55) | def meta_fit(
    method fit (line 63) | def fit(
    method predict (line 82) | def predict(
    method _raw_predict (line 89) | def _raw_predict(
    method _raw_predic_var (line 104) | def _raw_predic_var(self, X, trees, predictions, min_variance=0.0):
    method sample (line 121) | def sample(
    method get_fmin (line 143) | def get_fmin(self):

FILE: transopt/optimizer/model/rgpe.py
  function roll_col (line 14) | def roll_col(X: np.ndarray, shift: int) -> np.ndarray:
  function compute_ranking_loss (line 21) | def compute_ranking_loss(
  class RGPE (line 48) | class RGPE(Model):
    method __init__ (line 49) | def __init__(
    method _meta_fit_single_gp (line 75) | def _meta_fit_single_gp(
    method meta_fit (line 103) | def meta_fit(self,
    method fit (line 132) | def fit(self,
    method predict (line 157) | def predict(
    method _calculate_weights (line 184) | def _calculate_weights(self, alpha: float = 0.0):
    method _calculate_weights_with_no_observations (line 324) | def _calculate_weights_with_no_observations(self):
    method _calculate_weights_with_one_observation (line 346) | def _calculate_weights_with_one_observation(self):
    method _update_meta_data (line 398) | def _update_meta_data(self, *gps: GPy.models.GPRegression):
    method meta_update (line 403) | def meta_update(self):
    method set_XY (line 406) | def set_XY(self, Data:Dict):
    method print_Weights (line 410) | def print_Weights(self):
    method get_Weights (line 414) | def get_Weights(self):
    method loss (line 420) | def loss(self, task_uid: int) -> np.ndarray:
    method posterior_samples_f (line 429) | def posterior_samples_f(self,X, size=10, **predict_kwargs):
    method posterior_samples (line 451) | def posterior_samples(self, X, size=10, Y_metadata=None, likelihood=No...
    method get_fmin (line 476) | def get_fmin(self):

FILE: transopt/optimizer/model/sgpt.py
  function roll_col (line 13) | def roll_col(X: np.ndarray, shift: int) -> np.ndarray:
  class SGPT (line 20) | class SGPT(Model):
    method __init__ (line 21) | def __init__(
    method _meta_fit_single_gp (line 49) | def _meta_fit_single_gp(
    method meta_fit (line 77) | def meta_fit(self,
    method fit (line 106) | def fit(self,
    method predict (line 132) | def predict(self, X, return_full: bool = False, with_noise: bool = Fal...
    method Epanechnikov_kernel (line 155) | def Epanechnikov_kernel(self, X1, X2):
    method _calculate_weights (line 164) | def _calculate_weights(self, alpha: float = 0.0):
    method posterior_samples_f (line 215) | def posterior_samples_f(self,X, size=10, **predict_kwargs):
    method posterior_samples (line 237) | def posterior_samples(self, X, size=10, Y_metadata=None, likelihood=No...
    method get_fmin (line 262) | def get_fmin(self):

FILE: transopt/optimizer/model/smsego.py
  class SMSEGO (line 10) | class SMSEGO(Model):
    method __init__ (line 11) | def __init__(self, seed=0, normalize=True, **options):
    method fit (line 21) | def fit(self, X, Y):
    method predict (line 29) | def predict(self, X, full_cov=False):
    method _create_model (line 32) | def _create_model(self, X, Y):
    method _update_model (line 40) | def _update_model(self, X, Y):
    method _make_prediction (line 53) | def _make_prediction(self, X, full_cov=False):

FILE: transopt/optimizer/model/utils.py
  function is_pd (line 10) | def is_pd(a: np.ndarray) -> bool:
  function nearest_pd (line 27) | def nearest_pd(a: np.ndarray) -> np.ndarray:
  function compute_cholesky (line 51) | def compute_cholesky(matrix: np.ndarray) -> np.ndarray:
  class FixedKernel (line 83) | class FixedKernel(Fixed):
    method __init__ (line 89) | def __init__(
    method to_dict (line 113) | def to_dict(self) -> dict:
  function compute_alpha (line 123) | def compute_alpha(model: "GP", x) -> np.ndarray:
  class CrossTaskKernel (line 149) | class CrossTaskKernel(BasisFuncKernel):
    method __init__ (line 152) | def __init__(
    method _phi (line 169) | def _phi(self, X: np.ndarray) -> np.ndarray:

FILE: transopt/optimizer/normalizer/normalizer_base.py
  class NormalizerBase (line 5) | class NormalizerBase(ABC):
    method __init__ (line 6) | def __init__(self, config):
    method fit (line 9) | def fit(self, X, Y):
    method transform (line 12) | def transform(self, X = None, Y = None):
    method inverse_transform (line 15) | def inverse_transform(self, X = None, Y = None):

FILE: transopt/optimizer/normalizer/standerd.py
  class Standard_normalizer (line 29) | class Standard_normalizer(NormalizerBase):
    method __init__ (line 30) | def __init__(self, config, metadata =  None, metadata_info = None):
    method fit (line 35) | def fit(self, X, Y):
    method transform (line 38) | def transform(self, X = None, Y = None):
    method inverse_transform (line 45) | def inverse_transform(self, X = None, Y = None):

FILE: transopt/optimizer/optimizer_base/EvoOptimizerBase.py
  class EVOBase (line 13) | class EVOBase(OptimizerBase):
    method __init__ (line 17) | def __init__(self, config):

FILE: transopt/optimizer/optimizer_base/base.py
  class OptimizerBase (line 4) | class OptimizerBase(abc.ABC, metaclass=abc.ABCMeta):
    method __init__ (line 11) | def __init__(self, config, **kwargs):
    method suggest (line 27) | def suggest(self, n_suggestions:Union[None, int] = None)->List[Dict]:
    method observe (line 45) | def observe(self, input_vectors: Union[List[Dict], Dict], output_value...

FILE: transopt/optimizer/optimizer_base/bo.py
  class BO (line 17) | class BO(OptimizerBase):
    method __init__ (line 22) | def __init__(self, Refiner, Sampler, ACF, Pretrain, Model, Normalizer,...
    method link_task (line 42) | def link_task(self, task_name:str, search_space: SearchSpace):
    method search_space_refine (line 51) | def search_space_refine(self, metadata = None, metadata_info = None):
    method sample_initial_set (line 57) | def sample_initial_set(self, metadata = None, metadata_info = None):
    method pretrain (line 60) | def pretrain(self, metadata = None, metadata_info = None):
    method meta_fit (line 66) | def meta_fit(self, metadata = None, metadata_info = None):
    method fit (line 77) | def fit(self):
    method suggest (line 85) | def suggest(self):
    method observe (line 95) | def observe(self, X: np.ndarray, Y: List[Dict]) -> None:

FILE: transopt/optimizer/pretrain/deepkernelpretrain.py
  class Metric (line 20) | class Metric(object):
    method __init__ (line 21) | def __init__(self,prefix='train: '):
    method update (line 25) | def update(self,loss,noise,mse):
    method reset (line 30) | def reset(self,):
    method report (line 35) | def report(self):
    method get (line 40) | def get(self):
  function totorch (line 46) | def totorch(x,device):
  class MLP (line 50) | class MLP(nn.Module):
    method __init__ (line 51) | def __init__(self, input_size, hidden_size=[32,32,32,32], dropout=0.0):
    method forward (line 60) | def forward(self,x):
  class ExactGPLayer (line 70) | class ExactGPLayer(gpytorch.models.ExactGP):
    method __init__ (line 71) | def __init__(self, train_x, train_y, likelihood,config,dims ):
    method forward (line 82) | def forward(self, x):
  class DeepKernelPretrain (line 90) | class DeepKernelPretrain(nn.Module):
    method __init__ (line 91) | def __init__(self, config = {}):
    method set_data (line 118) | def set_data(self, metadata, metadata_info= None):
    method get_tasks (line 134) | def get_tasks(self,):
    method get_model_likelihood_mll (line 138) | def get_model_likelihood_mll(self, train_size):
    method epoch_end (line 150) | def epoch_end(self):
    method meta_train (line 154) | def meta_train(self, epochs = 50000, lr = 0.0001):
    method train_loop (line 164) | def train_loop(self, epoch, optimizer, scheduler=None):
    method test_loop (line 197) | def test_loop(self, task, train):
    method get_batch (line 215) | def get_batch(self,task):
    method get_support_and_queries (line 229) | def get_support_and_queries(self,task, train=False):
    method save_checkpoint (line 247) | def save_checkpoint(self, checkpoint):
    method load_checkpoint (line 254) | def load_checkpoint(self, checkpoint):

FILE: transopt/optimizer/pretrain/get_pretrain.py
  function get_pretrain (line 5) | def get_pretrain(pretrain_name, **kwargs):

FILE: transopt/optimizer/pretrain/hyper_bo.py
  class HyperBOPretrain (line 7) | class HyperBOPretrain(PretrainBase):
    method __init__ (line 8) | def __init__(self, config) -> None:

FILE: transopt/optimizer/pretrain/pretrain_base.py
  class PretrainBase (line 3) | class PretrainBase:
    method __init__ (line 4) | def __init__(self) -> None:

FILE: transopt/optimizer/refiner/box.py
  class BoxRefiner (line 5) | class BoxRefiner(RefinerBase):
    method __init__ (line 6) | def __init__(self, config) -> None:

FILE: transopt/optimizer/refiner/ellipse.py
  class EllipseRefiner (line 5) | class EllipseRefiner(RefinerBase):
    method __init__ (line 6) | def __init__(self, config) -> None:

FILE: transopt/optimizer/refiner/get_refiner.py
  function get_refiner (line 5) | def get_refiner(refiner_name, **kwargs):

FILE: transopt/optimizer/refiner/prune.py
  class Prune (line 6) | class Prune(RefinerBase):
    method __init__ (line 7) | def __init__(self, config) -> None:
    method refine (line 10) | def refine(self, search_space, metadata=None):
    method check_metadata_avaliable (line 14) | def check_metadata_avaliable(self, metadata):

FILE: transopt/optimizer/refiner/refiner_base.py
  class RefinerBase (line 4) | class RefinerBase:
    method __init__ (line 5) | def __init__(self, config) -> None:
    method refine (line 8) | def refine(self, search_space, metadata=None):
    method check_metadata_avaliable (line 12) | def check_metadata_avaliable(self, metadata):

FILE: transopt/optimizer/sampler/get_sampler.py
  function get_sampler (line 5) | def get_sampler(sampler_name, **kwargs):

FILE: transopt/optimizer/sampler/grid.py
  class GridSampler (line 6) | class GridSampler(Sampler):
    method generate_grid_for_variable (line 7) | def generate_grid_for_variable(self, var_range, is_discrete, steps):
    method sample (line 18) | def sample(self, search_space, steps=5, metadata=None):

FILE: transopt/optimizer/sampler/lhs.py
  class LatinHypercubeSampler (line 10) | class LatinHypercubeSampler(Sampler):
    method sample (line 11) | def sample(self, search_space:SearchSpace, metadata = None):

FILE: transopt/optimizer/sampler/lhs_BAK.py
  function lhs (line 11) | def lhs(d, samples=None, criterion=None, iterations=5, correlation_matri...
  function _lhsclassic (line 63) | def _lhsclassic(d, samples):
  function _lhscentered (line 69) | def _lhscentered(d, samples):
  function _lhsmaximin (line 76) | def _lhsmaximin(d, samples, iterations, lhstype):
  function _lhscorrelate (line 96) | def _lhscorrelate(d, samples, iterations):
  function _lhsmu (line 114) | def _lhsmu(d, samples=None, corr=None, M=5):

FILE: transopt/optimizer/sampler/random.py
  class RandomSampler (line 7) | class RandomSampler(Sampler):
    method sample (line 8) | def sample(self, search_space, metadata = None):

FILE: transopt/optimizer/sampler/sampler_base.py
  class Sampler (line 2) | class Sampler:
    method __init__ (line 3) | def __init__(self, n_samples, config) -> None:
    method sample (line 7) | def sample(self, search_space, metadata=None):
    method change_n_samples (line 10) | def change_n_samples(self, n_samples):
    method check_metadata_avaliable (line 13) | def check_metadata_avaliable(self, metadata):

FILE: transopt/optimizer/sampler/sobel.py
  class SobolSampler (line 8) | class SobolSampler(Sampler):
    method sample (line 9) | def sample(self, search_space, metadata = None):

FILE: transopt/optimizer/selector/fuzzy_selector.py
  class FuzzySelector (line 6) | class FuzzySelector(SelectorBase):
    method __init__ (line 7) | def __init__(self, config):
    method fetch_data (line 10) | def fetch_data(self, tasks_info):

FILE: transopt/optimizer/selector/lsh_selector.py
  class LSHSelector (line 5) | class LSHSelector(SelectorBase):
    method __init__ (line 6) | def __init__(self, config):
    method fetch_data (line 10) | def fetch_data(self, tasks_info):

FILE: transopt/optimizer/selector/selector_base.py
  class SelectorBase (line 6) | class SelectorBase:
    method __init__ (line 7) | def __init__(self, config):
    method fetch_data (line 13) | def fetch_data(self, tasks_info):

FILE: transopt/remote/experiment_client.py
  class ExperimentClient (line 5) | class ExperimentClient:
    method __init__ (line 6) | def __init__(self, server_url, timeout=10):
    method _handle_response (line 10) | def _handle_response(self, response):
    method start_experiment (line 17) | def start_experiment(self, params):
    method get_experiment_result (line 27) | def get_experiment_result(self, task_id):
    method wait_for_result (line 39) | def wait_for_result(self, task_id, poll_interval=2):

FILE: transopt/remote/experiment_server.py
  class ExperimentServer (line 5) | class ExperimentServer:
    method __init__ (line 6) | def __init__(self, task_handler):
    method _validate_params (line 11) | def _validate_params(self, params):
    method _setup_routes (line 15) | def _setup_routes(self):
    method run (line 54) | def run(self, host="0.0.0.0", port=5001):

FILE: transopt/remote/experiment_tasks.py
  class DebugTask (line 11) | class DebugTask(Task):
    method on_failure (line 12) | def on_failure(self, exc, task_id, args, kwargs, einfo):
    method on_success (line 15) | def on_success(self, retval, task_id, args, kwargs):
    method after_return (line 18) | def after_return(self, status, retval, task_id, args, kwargs, einfo):
  class ExperimentTaskHandler (line 22) | class ExperimentTaskHandler:
    method __init__ (line 23) | def __init__(self):
    method run_experiment (line 27) | def run_experiment(self, params):
    method start_experiment (line 57) | def start_experiment(self, params):

FILE: transopt/space/fidelity_space.py
  class FidelitySpace (line 7) | class FidelitySpace:
    method __init__ (line 8) | def __init__(self, fidelity_variables):
    method fidelity_names (line 12) | def fidelity_names(self):
    method get_fidelity_range (line 16) | def get_fidelity_range(self):

FILE: transopt/space/search_space.py
  class SearchSpace (line 7) | class SearchSpace:
    method __init__ (line 8) | def __init__(self, variables):
    method __getitem__ (line 23) | def __getitem__(self, item):
    method __contains__ (line 27) | def __contains__(self, item):
    method get_design_variables (line 30) | def get_design_variables(self):
    method get_design_variable (line 33) | def get_design_variable(self, name):
    method get_hyperparameter_names (line 36) | def get_hyperparameter_names(self):
    method get_hyperparameter_types (line 39) | def get_hyperparameter_types(self):
    method map_to_design_space (line 43) | def map_to_design_space(self, values: np.ndarray) -> dict:
    method map_from_design_space (line 64) | def map_from_design_space(self, values_dict: dict) -> np.ndarray:
    method update_range (line 81) | def update_range(self, name, new_range: tuple):

FILE: transopt/space/variable.py
  class Variable (line 4) | class Variable:
    method __init__ (line 5) | def __init__(self, name, type_):
    method search_space_range (line 10) | def search_space_range(self):
    method map2design (line 13) | def map2design(self, value):
    method map2search (line 17) | def map2search(self, value):
  class Continuous (line 22) | class Continuous(Variable):
    method __init__ (line 23) | def __init__(self, name, range_):
    method search_space_range (line 30) | def search_space_range(self):
    method map2design (line 33) | def map2design(self, value):
    method map2search (line 36) | def map2search(self, value):
  class Categorical (line 40) | class Categorical(Variable):
    method __init__ (line 41) | def __init__(self, name, categories):
    method search_space_range (line 49) | def search_space_range(self):
    method map2design (line 52) | def map2design(self, value):
    method map2search (line 55) | def map2search(self, value):
  class Integer (line 60) | class Integer(Variable):
    method __init__ (line 61) | def __init__(self, name, range_):
    method search_space_range (line 68) | def search_space_range(self):
    method map2design (line 71) | def map2design(self, value):
    method map2search (line 75) | def map2search(self, value):
  class LargeInteger (line 78) | class LargeInteger(Variable):
    method __init__ (line 79) | def __init__(self, name, range_):
    method search_space_range (line 85) | def search_space_range(self):
    method map2design (line 91) | def map2design(self, value):
    method map2search (line 95) | def map2search(self, value):
  class ExponentialInteger (line 99) | class ExponentialInteger(Variable):
    method __init__ (line 100) | def __init__(self, name, range_):
    method search_space_range (line 109) | def search_space_range(self):
    method map2design (line 114) | def map2design(self, value):
    method map2search (line 117) | def map2search(self, value):
  class LogContinuous (line 121) | class LogContinuous(Variable):
    method __init__ (line 122) | def __init__(self, name, range_):
    method search_space_range (line 129) | def search_space_range(self):
    method map2design (line 132) | def map2design(self, value):
    method map2search (line 135) | def map2search(self, value):

FILE: transopt/utils/Initialization.py
  function InitData (line 9) | def InitData(Init_method, KB, Init, Xdim, Dty, **kwargs):

FILE: transopt/utils/Kernel.py
  function construct_multi_objective_kernel (line 10) | def construct_multi_objective_kernel(input_dim, output_dim, base_kernel=...

FILE: transopt/utils/Normalization.py
  function get_normalizer (line 6) | def get_normalizer(name):
  function normalize_with_power_transform (line 20) | def normalize_with_power_transform(data: Union[np.ndarray, list], mean=N...
  function rank_normalize_with_power_transform (line 64) | def rank_normalize_with_power_transform(data: Union[np.ndarray, list]):
  function normalize (line 119) | def normalize(data:Union[List, Dict, np.ndarray], mean=None, std=None):

FILE: transopt/utils/Prior.py
  class Prior (line 13) | class Prior(object):
    method __new__ (line 16) | def __new__(cls, *args, **kwargs):
    method pdf (line 25) | def pdf(self, x):
    method plot (line 28) | def plot(self):
    method __repr__ (line 36) | def __repr__(self, *args, **kwargs):
  class Gaussian (line 40) | class Gaussian(Prior):
    method __new__ (line 53) | def __new__(cls, mu=0, sigma=1):  # Singleton:
    method __init__ (line 67) | def __init__(self, mu, sigma):
    method __str__ (line 73) | def __str__(self):
    method lnpdf (line 76) | def lnpdf(self, x):
    method lnpdf_grad (line 79) | def lnpdf_grad(self, x):
    method rvs (line 82) | def rvs(self, n):
    method getstate (line 85) | def getstate(self):
    method setstate (line 88) | def setstate(self, state):
  class Uniform (line 94) | class Uniform(Prior):
    method __new__ (line 97) | def __new__(cls, lower=0, upper=1):  # Singleton:
    method __init__ (line 111) | def __init__(self, lower, upper):
    method __str__ (line 122) | def __str__(self):
    method lnpdf (line 125) | def lnpdf(self, x):
    method lnpdf_grad (line 129) | def lnpdf_grad(self, x):
    method rvs (line 132) | def rvs(self, n):
  class LogGaussian (line 142) | class LogGaussian(Gaussian):
    method __new__ (line 155) | def __new__(cls, mu=0, sigma=1, name=''):  # Singleton:
    method __init__ (line 169) | def __init__(self, mu, sigma, name):
    method __str__ (line 176) | def __str__(self):
    method lnpdf (line 179) | def lnpdf(self, x):
    method lnpdf_grad (line 182) | def lnpdf_grad(self, x):
    method rvs (line 185) | def rvs(self, n):
    method getstate (line 188) | def getstate(self):
    method setstate (line 191) | def setstate(self, state):
  class MultivariateGaussian (line 197) | class MultivariateGaussian(Prior):
    method __new__ (line 210) | def __new__(cls, mu=0, var=1):  # Singleton:
    method __init__ (line 226) | def __init__(self, mu, var):
    method __str__ (line 237) | def __str__(self):
    method summary (line 240) | def summary(self):
    method pdf (line 243) | def pdf(self, x):
    method lnpdf (line 247) | def lnpdf(self, x):
    method lnpdf_grad (line 252) | def lnpdf_grad(self, x):
    method rvs (line 257) | def rvs(self, n):
    method plot (line 260) | def plot(self):
    method __getstate__ (line 268) | def __getstate__(self):
    method __setstate__ (line 271) | def __setstate__(self, state):
  function gamma_from_EV (line 282) | def gamma_from_EV(E, V):
  class Gamma (line 287) | class Gamma(Prior):
    method __new__ (line 300) | def __new__(cls, a=1, b=.5, name = ''):  # Singleton:
    method a (line 315) | def a(self):
    method b (line 319) | def b(self):
    method __init__ (line 322) | def __init__(self, a, b, name=''):
    method __str__ (line 328) | def __str__(self):
    method summary (line 331) | def summary(self):
    method lnpdf (line 342) | def lnpdf(self, x):
    method lnpdf_grad (line 345) | def lnpdf_grad(self, x):
    method rvs (line 348) | def rvs(self, n):
    method getstate (line 352) | def getstate(self):
    method update (line 355) | def update(self, value):
    method from_EV (line 360) | def from_EV(E, V):
    method __getstate__ (line 372) | def __getstate__(self):
    method __setstate__ (line 375) | def __setstate__(self, state):
  class InverseGamma (line 380) | class InverseGamma(Gamma):
    method __str__ (line 393) | def __str__(self):
    method summary (line 396) | def summary(self):
    method from_EV (line 400) | def from_EV(E, V):
    method lnpdf (line 403) | def lnpdf(self, x):
    method lnpdf_grad (line 406) | def lnpdf_grad(self, x):
    method rvs (line 409) | def rvs(self, n):
  class DGPLVM_KFDA (line 412) | class DGPLVM_KFDA(Prior):
    method __init__ (line 436) | def __init__(self, lambdaa, sigma2, lbl, kern, x_shape):
    method get_class_label (line 449) | def get_class_label(self, y):
    method compute_cls (line 457) | def compute_cls(self, x):
    method x_reduced (line 470) | def x_reduced(self, cls):
    method compute_lst_ni (line 476) | def compute_lst_ni(self):
    method compute_a (line 490) | def compute_a(self, lst_ni):
    method compute_A (line 503) | def compute_A(self, lst_ni):
    method lnpdf (line 513) | def lnpdf(self, x):
    method lnpdf_grad (line 524) | def lnpdf_grad(self, x):
    method rvs (line 536) | def rvs(self, n):
    method __str__ (line 539) | def __str__(self):
    method __getstate___ (line 542) | def __getstate___(self):
    method __setstate__ (line 545) | def __setstate__(self, state):
  class DGPLVM (line 559) | class DGPLVM(Prior):
    method __new__ (line 570) | def __new__(cls, sigma2, lbl, x_shape):
    method __init__ (line 573) | def __init__(self, sigma2, lbl, x_shape):
    method get_class_label (line 582) | def get_class_label(self, y):
    method compute_cls (line 590) | def compute_cls(self, x):
    method compute_Mi (line 601) | def compute_Mi(self, cls):
    method compute_indices (line 610) | def compute_indices(self, x):
    method compute_listIndices (line 621) | def compute_listIndices(self, data_idx):
    method compute_Sb (line 637) | def compute_Sb(self, cls, M_i, M_0):
    method compute_Sw (line 646) | def compute_Sw(self, cls, M_i):
    method compute_sig_beta_Bi (line 658) | def compute_sig_beta_Bi(self, data_idx, M_i, M_0, lst_idx_all):
    method compute_wj (line 681) | def compute_wj(self, data_idx, M_i):
    method compute_sig_alpha_W (line 692) | def compute_sig_alpha_W(self, data_idx, lst_idx_all, W_i):
    method lnpdf (line 709) | def lnpdf(self, x):
    method lnpdf_grad (line 723) | def lnpdf_grad(self, x):
    method rvs (line 761) | def rvs(self, n):
    method __str__ (line 764) | def __str__(self):
  class DGPLVM_Lamda (line 773) | class DGPLVM_Lamda(Prior, Parameterized):
    method __init__ (line 794) | def __init__(self, sigma2, lbl, x_shape, lamda, name='DP_prior'):
    method get_class_label (line 807) | def get_class_label(self, y):
    method compute_cls (line 815) | def compute_cls(self, x):
    method compute_Mi (line 826) | def compute_Mi(self, cls):
    method compute_indices (line 835) | def compute_indices(self, x):
    method compute_listIndices (line 846) | def compute_listIndices(self, data_idx):
    method compute_Sb (line 862) | def compute_Sb(self, cls, M_i, M_0):
    method compute_Sw (line 871) | def compute_Sw(self, cls, M_i):
    method compute_sig_beta_Bi (line 883) | def compute_sig_beta_Bi(self, data_idx, M_i, M_0, lst_idx_all):
    method compute_wj (line 906) | def compute_wj(self, data_idx, M_i):
    method compute_sig_alpha_W (line 917) | def compute_sig_alpha_W(self, data_idx, lst_idx_all, W_i):
    method lnpdf (line 934) | def lnpdf(self, x):
    method lnpdf_grad (line 955) | def lnpdf_grad(self, x):
    method rvs (line 1010) | def rvs(self, n):
    method __str__ (line 1013) | def __str__(self):
  class DGPLVM_T (line 1018) | class DGPLVM_T(Prior):
    method __init__ (line 1039) | def __init__(self, sigma2, lbl, x_shape, vec):
    method get_class_label (line 1050) | def get_class_label(self, y):
    method compute_cls (line 1058) | def compute_cls(self, x):
    method compute_Mi (line 1069) | def compute_Mi(self, cls):
    method compute_indices (line 1079) | def compute_indices(self, x):
    method compute_listIndices (line 1090) | def compute_listIndices(self, data_idx):
    method compute_Sb (line 1106) | def compute_Sb(self, cls, M_i, M_0):
    method compute_Sw (line 1115) | def compute_Sw(self, cls, M_i):
    method compute_sig_beta_Bi (line 1127) | def compute_sig_beta_Bi(self, data_idx, M_i, M_0, lst_idx_all):
    method compute_wj (line 1150) | def compute_wj(self, data_idx, M_i):
    method compute_sig_alpha_W (line 1161) | def compute_sig_alpha_W(self, data_idx, lst_idx_all, W_i):
    method lnpdf (line 1178) | def lnpdf(self, x):
    method lnpdf_grad (line 1196) | def lnpdf_grad(self, x):
    method rvs (line 1238) | def rvs(self, n):
    method __str__ (line 1241) | def __str__(self):
  class HalfT (line 1247) | class HalfT(Prior):
    method __new__ (line 1258) | def __new__(cls, A, nu):  # Singleton:
    method __init__ (line 1268) | def __init__(self, A, nu):
    method __str__ (line 1273) | def __str__(self):
    method lnpdf (line 1276) | def lnpdf(self, theta):
    method lnpdf_grad (line 1293) | def lnpdf_grad(self, theta):
    method rvs (line 1302) | def rvs(self, n):
  class Exponential (line 1311) | class Exponential(Prior):
    method __new__ (line 1322) | def __new__(cls, l):  # Singleton:
    method __init__ (line 1332) | def __init__(self, l):
    method __str__ (line 1335) | def __str__(self):
    method summary (line 1338) | def summary(self):
    method lnpdf (line 1346) | def lnpdf(self, x):
    method lnpdf_grad (line 1349) | def lnpdf_grad(self, x):
    method rvs (line 1352) | def rvs(self, n):
  class StudentT (line 1355) | class StudentT(Prior):
    method __new__ (line 1369) | def __new__(cls, mu=0, sigma=1, nu=4):  # Singleton:
    method __init__ (line 1383) | def __init__(self, mu, sigma, nu):
    method __str__ (line 1389) | def __str__(self):
    method lnpdf (line 1392) | def lnpdf(self, x):
    method lnpdf_grad (line 1396) | def lnpdf_grad(self, x):
    method rvs (line 1399) | def rvs(self, n):

FILE: transopt/utils/Read.py
  function read_file (line 6) | def read_file(file_path)->pd.DataFrame:
  function read_url (line 47) | def read_url(url):

FILE: transopt/utils/Visualization.py
  function visual_contour (line 15) | def visual_contour(
  function visual_oned (line 162) | def visual_oned(
  function visual_pf (line 307) | def visual_pf(

FILE: transopt/utils/check.py
  function check_dir (line 8) | def  check_dir(self):
  function check_url (line 14) | def check_url(url):
  function check_ip_address (line 22) | def check_ip_address(ip_address):

FILE: transopt/utils/encoding.py
  function target_encoding (line 3) | def target_encoding(df:pds.DataFrame, column_name, target_name):
  function multitarget_encoding (line 29) | def multitarget_encoding(df:pds.DataFrame, column_name, target_names):

FILE: transopt/utils/hypervolume.py
  function find_pareto (line 5) | def find_pareto(X, y):
  function find_pareto_only_y (line 41) | def find_pareto_only_y(y):
  function create_cells (line 71) | def create_cells(pf, ref, ref_inv=None):
  function find_pareto_from_posterior (line 142) | def find_pareto_from_posterior(X, mean, y):
  function calc_hypervolume (line 183) | def calc_hypervolume(y, w_ref):

FILE: transopt/utils/log.py
  function get_logger (line 10) | def get_logger(logger_name: str) -> logging.Logger:

FILE: transopt/utils/openml_data_manager.py
  function _check_dir (line 36) | def _check_dir(path: Path):
  function get_openml100_taskids (line 48) | def get_openml100_taskids():
  function get_openmlcc18_taskids (line 69) | def get_openmlcc18_taskids():
  function _load_data (line 86) | def _load_data(task_id: int):
  class DataManager (line 123) | class DataManager(abc.ABC, metaclass=abc.ABCMeta):
    method __init__ (line 132) | def __init__(self):
    method load (line 136) | def load(self):
    method create_save_directory (line 142) | def create_save_directory(self, save_dir: Path):
    method _download_file_with_progressbar (line 156) | def _download_file_with_progressbar(self, data_url: str, data_file: Pa...
    method _untar_data (line 179) | def _untar_data(self, compressed_file: Path, save_dir: Union[Path, Non...
    method _unzip_data (line 189) | def _unzip_data(self, compressed_file: Path, save_dir: Union[Path, Non...
  class HoldoutDataManager (line 197) | class HoldoutDataManager(DataManager):
    method __init__ (line 210) | def __init__(self):
  class CrossvalidationDataManager (line 221) | class CrossvalidationDataManager(DataManager):
    method __init__ (line 233) | def __init__(self):
  class OpenMLHoldoutDataManager (line 242) | class OpenMLHoldoutDataManager(HoldoutDataManager):
    method __init__ (line 262) | def __init__(self, openml_task_id: int, rng: Union[int, np.random.Rand...
    method load (line 276) | def load(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray,
    method replace_nans_in_cat_columns (line 303) | def replace_nans_in_cat_columns(X_train: np.ndarray, X_valid: np.ndarr...
  class OpenMLCrossvalidationDataManager (line 329) | class OpenMLCrossvalidationDataManager(CrossvalidationDataManager):
    method __init__ (line 349) | def __init__(self, openml_task_id: int, rng: Union[int, np.random.Rand...
    method load (line 363) | def load(self):

FILE: transopt/utils/pareto.py
  function convert_minimization (line 10) | def convert_minimization(Y, obj_type=None):
  function find_pareto_front (line 32) | def find_pareto_front(Y, return_index=False, obj_type=None, eps=1e-8):
  function check_pareto (line 54) | def check_pareto(Y, obj_type=None):
  function calc_hypervolume (line 74) | def calc_hypervolume(Y, ref_point, obj_type=None):
  function calc_pred_error (line 88) | def calc_pred_error(Y, Y_pred_mean, average=False):

FILE: transopt/utils/path.py
  function get_library_path (line 5) | def get_library_path():
  function get_absolut_path (line 15) | def get_absolut_path():
  function get_log_file_path (line 26) | def get_log_file_path():

FILE: transopt/utils/plot.py
  function plot2D (line 5) | def plot2D(X, Y, c='black', ls='', marker='o', fillstyle=None, label=Non...
  function plot3D (line 36) | def plot3D(X, Y, Z, c='black', ls='', marker='o', fillstyle=None, label=...
  function surface3D (line 65) | def surface3D(X_grid, Y_grid, cmap=cm.Blues, ax=None, file=None, show=Fa...

FILE: transopt/utils/profile.py
  function profile_function (line 4) | def profile_function(filename=None):

FILE: transopt/utils/rng_helper.py
  function get_rng (line 14) | def get_rng(rng: Union[int, np.random.RandomState, None] = None,
  function _cast_int_to_random_state (line 42) | def _cast_int_to_random_state(rng: Union[int, np.random.RandomState]) ->...
  function serialize_random_state (line 62) | def serialize_random_state(random_state: np.random.RandomState) -> Tuple...
  function deserialize_random_state (line 68) | def deserialize_random_state(random_state: Tuple[int, List, int, int, in...

FILE: transopt/utils/serialization.py
  class InputData (line 8) | class InputData:
  class TaskData (line 13) | class TaskData:
  function output_to_ndarray (line 39) | def output_to_ndarray(Y: List[Dict]) -> np.ndarray:
  function multioutput_to_ndarray (line 49) | def multioutput_to_ndarray(output_value: List[Dict], num_output:int) -> ...
  function convert_np_to_bulidin (line 62) | def convert_np_to_bulidin(obj):

FILE: transopt/utils/sk.py
  function skDemo (line 27) | def skDemo(n=5) :
  class o (line 53) | class o:
    method __init__ (line 54) | def __init__(i,**d) : i.__dict__.update(**d)
  class THE (line 56) | class THE:
  function cliffsDeltaSlow (line 73) | def cliffsDeltaSlow(lst1,lst2, dull = THE.cliffs.dull):
  function cliffsDelta (line 84) | def cliffsDelta(lst1, lst2,  dull=THE.cliffs.dull):
  function bootstrap (line 106) | def bootstrap(y0,z0,conf=THE.bs.conf,b=THE.bs.b):
  function same (line 147) | def same(x): return x
  class Mine (line 149) | class Mine:
    method identify (line 152) | def identify(i):
    method __repr__ (line 156) | def __repr__(i):
  class Rx (line 167) | class Rx(Mine):
    method __init__ (line 169) | def __init__(i, rx="",vals=[], key=same):
    method tiles (line 176) | def tiles(i,lo=0,hi=1): return  xtile(i.vals,lo,hi)
    method __lt__ (line 177) | def __lt__(i,j):        return i.med < j.med
    method __eq__ (line 178) | def __eq__(i,j):
    method __repr__ (line 181) | def __repr__(i):
    method xpect (line 183) | def xpect(i,j,b4):
    method data (line 191) | def data(**d):
    method fileIn (line 196) | def fileIn(f):
    method sum (line 211) | def sum(rxs):
    method show (line 220) | def show(rxs):
    method write (line 228) | def write(rxs):
    method sk (line 238) | def sk(rxs):
  function pairs (line 263) | def pairs(lst):
  function words (line 270) | def words(f):
  function xtile (line 276) | def xtile(lst,lo,hi,
  function thing (line 309) | def thing(x):
  function _cliffsDelta (line 318) | def _cliffsDelta():
  function bsTest (line 327) | def bsTest(n=1000,mu1=10,sigma1=1,mu2=10.2,sigma2=1):

FILE: transopt/utils/weights.py
  function _set_weight (line 5) | def _set_weight(w, c, v, unit, s, n_obj, dim):
  function _no_weight (line 20) | def _no_weight(unit, s, dim):
  function init_weight (line 30) | def init_weight(n_obj, n_sample):
  function tchebycheff (line 48) | def tchebycheff(X, W, ideal=None, normalize=False):

FILE: webui/src/App.js
  function App (line 24) | function App() {

FILE: webui/src/components/CalendarView/index.js
  constant THEME_BG (line 7) | const THEME_BG = CALENDAR_EVENT_STYLE
  function CalendarView (line 9) | function CalendarView({calendarEvents, addNewEvent, openDayDetail}){

FILE: webui/src/components/Cards/TitleCard.js
  function TitleCard (line 4) | function TitleCard({title, children, topMargin, TopSideButtons}){

FILE: webui/src/components/Input/InputText.js
  function InputText (line 4) | function InputText({labelTitle, labelStyle, type, containerStyle, defaul...

FILE: webui/src/components/Input/SearchBar.js
  function SearchBar (line 5) | function SearchBar({searchText, styleClass, placeholderText, setSearchTe...

FILE: webui/src/components/Input/SelectBox.js
  function SelectBox (line 8) | function SelectBox(props){

FILE: webui/src/components/Input/TextAreaInput.js
  function TextAreaInput (line 4) | function TextAreaInput({labelTitle, labelStyle, type, containerStyle, de...

FILE: webui/src/components/Input/ToogleInput.js
  function ToogleInput (line 4) | function ToogleInput({labelTitle, labelStyle, type, containerStyle, defa...

FILE: webui/src/components/Typography/ErrorText.js
  function ErrorText (line 1) | function ErrorText({styleClass, children}){

FILE: webui/src/components/Typography/HelperText.js
  function HelperText (line 1) | function HelperText({className, children}){

FILE: webui/src/components/Typography/Subtitle.js
  function Subtitle (line 1) | function Subtitle({styleClass, children}){

FILE: webui/src/components/Typography/Title.js
  function Title (line 1) | function Title({className, children}){

FILE: webui/src/containers/Header.js
  function Header (line 14) | function Header(){

FILE: webui/src/containers/Layout.js
  function Layout (line 11) | function Layout(){

FILE: webui/src/containers/LeftSidebar.js
  function LeftSidebar (line 7) | function LeftSidebar(){

FILE: webui/src/containers/ModalLayout.js
  function ModalLayout (line 9) | function ModalLayout(){

FILE: webui/src/containers/PageContent.js
  function PageContent (line 12) | function PageContent(){

FILE: webui/src/containers/RightSidebar.js
  function RightSidebar (line 9) | function RightSidebar(){

FILE: webui/src/containers/SidebarSubmenu.js
  function SidebarSubmenu (line 6) | function SidebarSubmenu({submenu, name, icon}){

FILE: webui/src/containers/SuspenseContent.js
  function SuspenseContent (line 1) | function SuspenseContent(){

FILE: webui/src/features/algorithm/components/OptTable.js
  function OptTable (line 47) | function OptTable({ optimizer }) {

FILE: webui/src/features/algorithm/components/SelectPlugin.js
  function SelectAlgorithm (line 12) | function SelectAlgorithm({SpaceRefiner, Sampler, Pretrain, Model, ACF, D...

FILE: webui/src/features/algorithm/index.js
  class Algorithm (line 9) | class Algorithm extends React.Component {
    method constructor (line 10) | constructor(props) {
    method render (line 29) | render() {

FILE: webui/src/features/analytics/charts/Box.js
  function Box (line 9) | function Box({ BoxData }) {

FILE: webui/src/features/analytics/charts/Trajectory.js
  class Trajectory (line 44) | class Trajectory extends Component {
    method constructor (line 45) | constructor(props) {
    method render (line 49) | render() {

FILE: webui/src/features/analytics/components/SelectTask.js
  function ASearch (line 14) | function ASearch({key, name, restField, remove, selections}) {
  function SelectTask (line 125) | function SelectTask({selections, handleClick}) {

FILE: webui/src/features/analytics/index.js
  class Analytics (line 17) | class Analytics extends React.Component {
    method constructor (line 18) | constructor(props) {
    method render (line 66) | render() {

FILE: webui/src/features/calendar/CalendarEventsBodyRightDrawer.js
  constant THEME_BG (line 3) | const THEME_BG = CALENDAR_EVENT_STYLE
  function CalendarEventsBodyRightDrawer (line 5) | function CalendarEventsBodyRightDrawer({filteredEvents}){

FILE: webui/src/features/calendar/index.js
  constant INITIAL_EVENTS (line 12) | const INITIAL_EVENTS = CALENDAR_INITIAL_EVENTS
  function Calendar (line 14) | function Calendar(){

FILE: webui/src/features/charts/components/BarChart.js
  function BarChart (line 15) | function BarChart(){

FILE: webui/src/features/charts/components/DoughnutChart.js
  function DoughnutChart (line 18) | function DoughnutChart(){

FILE: webui/src/features/charts/components/LineChart.js
  function LineChart (line 26) | function LineChart(){

FILE: webui/src/features/charts/components/PieChart.js
  function PieChart (line 18) | function PieChart(){

FILE: webui/src/features/charts/components/ScatterChart.js
  function ScatterChart (line 16) | function ScatterChart(){

FILE: webui/src/features/charts/components/StackBarChart.js
  function StackBarChart (line 15) | function StackBarChart(){

FILE: webui/src/features/charts/index.js
  function Charts (line 13) | function Charts(){

FILE: webui/src/features/chatbot/ChatBot.js
  class ChatBot (line 7) | class ChatBot extends React.Component {
    method render (line 9) | render() {

FILE: webui/src/features/chatbot/components/ChatUI.js
  function ChatUI (line 6) | function ChatUI() {

FILE: webui/src/features/common/components/ConfirmationModalBody.js
  function ConfirmationModalBody (line 7) | function ConfirmationModalBody({ extraObject, closeModal}){

FILE: webui/src/features/common/components/NotificationBodyRightDrawer.js
  function NotificationBodyRightDrawer (line 1) | function NotificationBodyRightDrawer(){

FILE: webui/src/features/dashboard/components/AmountStats.js
  function AmountStats (line 3) | function AmountStats({}){

FILE: webui/src/features/dashboard/components/BarChart.js
  function BarChart (line 15) | function BarChart({ ImportanceData }){

FILE: webui/src/features/dashboard/components/DashboardStats.js
  function DashboardStats (line 1) | function DashboardStats({title, icon, value, description, colorIndex}){

FILE: webui/src/features/dashboard/components/DashboardTopBar.js
  function DashboardTopBar (line 21) | function DashboardTopBar({updateDashboardPeriod}){

FILE: webui/src/features/dashboard/components/DoughnutChart.js
  function DoughnutChart (line 18) | function DoughnutChart(){

FILE: webui/src/features/dashboard/components/Footprint.js
  function Footprint (line 16) | function Footprint({ ScatterData = {} }) { // 提供默认值为空对象

FILE: webui/src/features/dashboard/components/Importance.js
  function Importance (line 3) | function Importance() {

FILE: webui/src/features/dashboard/components/PageStats.js
  function PageStats (line 5) | function PageStats({}){

FILE: webui/src/features/dashboard/components/ScatterChart.js
  function Footprint (line 38) | function Footprint({ ScatterData = {} }) {

FILE: webui/src/features/dashboard/components/UserChannels.js
  function UserChannels (line 11) | function UserChannels(){

FILE: webui/src/features/dashboard/index.js
  class Dashboard (line 15) | class Dashboard extends React.Component {
    method constructor (line 16) | constructor(props) {
    method componentDidMount (line 109) | componentDidMount() {
    method componentWillUnmount (line 114) | componentWillUnmount() {
    method render (line 149) | render() {

FILE: webui/src/features/documentation/DocComponents.js
  function DocComponents (line 13) | function DocComponents(){

FILE: webui/src/features/documentation/DocFeatures.js
  function Features (line 13) | function Features(){

FILE: webui/src/features/documentation/DocGettingStarted.js
  function GettingStarted (line 11) | function GettingStarted(){

FILE: webui/src/features/documentation/components/DocComponentsContent.js
  function DocComponentsContent (line 12) | function DocComponentsContent(){

FILE: webui/src/features/documentation/components/DocComponentsNav.js
  function DocComponentsNav (line 3) | function DocComponentsNav({activeIndex}){

FILE: webui/src/features/documentation/components/FeaturesContent.js
  function FeaturesContent (line 6) | function FeaturesContent(){

FILE: webui/src/features/documentation/components/FeaturesNav.js
  function FeaturesNav (line 3) | function FeaturesNav({activeIndex}){

FILE: webui/src/features/documentation/components/GettingStartedContent.js
  function GettingStartedContent (line 6) | function GettingStartedContent(){

FILE: webui/src/features/documentation/components/GettingStartedNav.js
  function GettingStartedNav (line 3) | function GettingStartedNav({activeIndex}){

FILE: webui/src/features/experiment/components/DashboardStats.js
  function DashboardStats (line 1) | function DashboardStats({title, icon, value, description, colorIndex}){

FILE: webui/src/features/experiment/components/SearchData.js
  function SearchData (line 18) | function SearchData({set_dataset}) {

FILE: webui/src/features/experiment/components/SelectAlgorithm.js
  function SelectAlgorithm (line 13) | function SelectAlgorithm({ SpaceRefiner, Sampler, Pretrain, Model, ACF, ...

FILE: webui/src/features/experiment/components/SelectData.js
  function SelectData (line 14) | function SelectData({DatasetData, updateTable, DatasetSelector}) {
  function Info (line 179) | function Info({isExact, data}) {

FILE: webui/src/features/experiment/components/SelectTask.js
  function TaskTable (line 8) | function TaskTable({ tasks }) {
  function SelectTask (line 32) | function SelectTask({ data, updateTable }) {

FILE: webui/src/features/experiment/index.js
  class Experiment (line 11) | class Experiment extends React.Component {
    method constructor (line 12) | constructor(props) {
    method render (line 76) | render() {

FILE: webui/src/features/integration/index.js
  constant INITIAL_INTEGRATION_LIST (line 7) | const INITIAL_INTEGRATION_LIST = [
  function Integration (line 17) | function Integration(){

FILE: webui/src/features/leads/components/AddLeadModalBody.js
  constant INITIAL_LEAD_OBJ (line 8) | const INITIAL_LEAD_OBJ = {
  function AddLeadModalBody (line 14) | function AddLeadModalBody({closeModal}){

FILE: webui/src/features/leads/index.js
  function Leads (line 26) | function Leads(){

FILE: webui/src/features/run/components/DataTable.js
  function DataTable (line 6) | function DataTable({datasets, optimizer}) {

FILE: webui/src/features/run/components/OptTable.js
  function OptTable (line 6) | function OptTable({optimizer}) {

FILE: webui/src/features/run/components/Run.js
  function Run (line 13) | function Run() {

FILE: webui/src/features/run/components/RunProgress.js
  class RunProgress (line 9) | class RunProgress extends React.Component {
    method constructor (line 10) | constructor(props) {
    method componentDidMount (line 21) | componentDidMount() {
    method componentWillUnmount (line 26) | componentWillUnmount() {
    method render (line 83) | render() {

FILE: webui/src/features/run/components/TaskTable.js
  function TaskTable (line 6) | function TaskTable({tasks}) {

FILE: webui/src/features/run/index.js
  class RunPage (line 14) | class RunPage extends React.Component {
    method constructor (line 15) | constructor(props) {
    method render (line 25) | render() {

FILE: webui/src/features/seldata/components/DataTable.js
  function DataTable (line 6) | function DataTable({ SpaceRefiner, SpaceRefinerDataSelector, SpaceRefine...

FILE: webui/src/features/seldata/components/SearchData.js
  function SearchData (line 18) | function SearchData({set_dataset}) {

FILE: webui/src/features/seldata/components/SelectData.js
  function SelectData (line 14) | function SelectData({DatasetData, updateTable, DatasetSelector}) {
  function Info (line 179) | function Info({isExact, data}) {

FILE: webui/src/features/seldata/index.js
  class Dataselector (line 14) | class Dataselector extends React.Component {
    method constructor (line 15) | constructor(props) {
    method render (line 65) | render() {

FILE: webui/src/features/settings/billing/index.js
  constant BILLS (line 9) | const BILLS = [
  function Billing (line 29) | function Billing(){

FILE: webui/src/features/settings/profilesettings/index.js
  function ProfileSettings (line 10) | function ProfileSettings(){

FILE: webui/src/features/settings/team/index.js
  constant TEAM_MEMBERS (line 23) | const TEAM_MEMBERS = [
  function Team (line 33) | function Team(){

FILE: webui/src/features/transactions/index.js
  function Transactions (line 57) | function Transactions(){

FILE: webui/src/features/user/ForgotPassword.js
  function ForgotPassword (line 8) | function ForgotPassword(){

FILE: webui/src/features/user/LandingIntro.js
  function LandingIntro (line 5) | function LandingIntro(){

FILE: webui/src/features/user/Login.js
  function Login (line 7) | function Login(){

FILE: webui/src/features/user/Register.js
  function Register (line 7) | function Register(){

FILE: webui/src/features/user/components/TemplatePointers.js
  function TemplatePointers (line 1) | function TemplatePointers(){

FILE: webui/src/pages/GettingStarted.js
  function ExternalPage (line 5) | function ExternalPage(){

FILE: webui/src/pages/protected/404.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/Algorithm.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/Analytics.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/Bills.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/Blank.js
  function InternalPage (line 7) | function InternalPage(){

FILE: webui/src/pages/protected/Calendar.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/Charts.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/ChatOpt.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/Dashboard.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/Experiment.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/Integration.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/Leads.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/ProfileSettings.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/Run.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/Seldata.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/Team.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/Transactions.js
  function InternalPage (line 6) | function InternalPage(){

FILE: webui/src/pages/protected/Welcome.js
  function InternalPage (line 7) | function InternalPage(){
Condensed preview — 419 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,990K chars).
[
  {
    "path": ".gitignore",
    "chars": 1278,
    "preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packagi"
  },
  {
    "path": "LICENSE",
    "chars": 1516,
    "preview": "BSD 3-Clause License\n\nCopyright (c) 2023, peilimao\nAll rights reserved.\n\nRedistribution and use in source and binary for"
  },
  {
    "path": "MANIFEST.in",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "README.md",
    "chars": 5867,
    "preview": "<p align=\"center\">\n  <a href=\"https://maopl.github.io/TransOpt-doc/\">\n    <img src=\"./docs/source/_static/figures/transo"
  },
  {
    "path": "demo/analysis.py",
    "chars": 1703,
    "preview": "import logging\nimport os\nimport argparse\n\nfrom pathlib import Path\nfrom transopt.ResultAnalysis.AnalysisPipeline import "
  },
  {
    "path": "demo/causal_analysis.py",
    "chars": 1813,
    "preview": "import logging\nimport os\nimport argparse\n\nfrom pathlib import Path\nfrom transopt.ResultAnalysis.AnalysisPipeline import "
  },
  {
    "path": "demo/comparison/analysis_hypervolume.py",
    "chars": 12849,
    "preview": "import sys\nfrom pathlib import Path\n\ncurrent_path = Path(__file__).resolve().parent\npackage_path = current_path.parent.p"
  },
  {
    "path": "demo/comparison/analysis_plot.py",
    "chars": 16354,
    "preview": "import sys\nfrom pathlib import Path\n\ncurrent_path = Path(__file__).resolve().parent\npackage_path = current_path.parent.p"
  },
  {
    "path": "demo/comparison/experiment_gcc.py",
    "chars": 4800,
    "preview": "import sys\nfrom pathlib import Path\n\ncurrent_dir = Path(__file__).resolve().parent\npackage_dir = current_dir.parent.pare"
  },
  {
    "path": "demo/comparison/experiment_llvm.py",
    "chars": 4332,
    "preview": "import sys\nfrom pathlib import Path\n\ncurrent_dir = Path(__file__).resolve().parent\npackage_dir = current_dir.parent.pare"
  },
  {
    "path": "demo/comparison/features_by_workload_gcc.json",
    "chars": 17283,
    "preview": "{\n    \"cbench-consumer-tiff2bw\": {\n        \"common\": [\n            \"align-jumps\",\n            \"align-labels\",\n          "
  },
  {
    "path": "demo/comparison/features_by_workload_gcc_extra.json",
    "chars": 12062,
    "preview": "{\n    \"cbench-automotive-bitcount\": {\n        \"common\": [\n            \"align-labels\",\n            \"tree-ter\",\n          "
  },
  {
    "path": "demo/comparison/features_by_workload_llvm.json",
    "chars": 10657,
    "preview": "{\n    \"cbench-telecom-gsm\": {\n        \"common\": [\n            \"early-cse\",\n            \"gvn\",\n            \"instcombine\","
  },
  {
    "path": "demo/comparison/plot.py",
    "chars": 11292,
    "preview": "import json\nimport sys\nfrom pathlib import Path\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy a"
  },
  {
    "path": "demo/comparison/plot_samples_dbms.py",
    "chars": 3390,
    "preview": "import sys\nfrom pathlib import Path\n\ncurrent_path = Path(__file__).resolve().parent\npackage_path = current_path.parent.p"
  },
  {
    "path": "demo/comparison/start_server.py",
    "chars": 1261,
    "preview": "import os\nimport sys\nfrom pathlib import Path\n\n# Define the current and package paths\ncurrent_path = Path(__file__).reso"
  },
  {
    "path": "demo/correlation_analysis.py",
    "chars": 1619,
    "preview": "import logging\nimport os\nimport argparse\n\nfrom pathlib import Path\nfrom csstuning.compiler.compiler_benchmark import Com"
  },
  {
    "path": "demo/experiment_lsh_validity.py",
    "chars": 7093,
    "preview": "import random\nimport string\nimport time\nimport uuid\nimport pandas as pd\n\nfrom transopt.datamanager.manager import DataMa"
  },
  {
    "path": "demo/experiments.py",
    "chars": 2584,
    "preview": "import logging\nimport os\nimport argparse\nimport sys\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\npackage_di"
  },
  {
    "path": "demo/importances/cal_relationship.py",
    "chars": 7674,
    "preview": "import sys\nfrom pathlib import Path\n\ncurrent_path = Path(__file__).resolve().parent\npackage_path = current_path.parent.p"
  },
  {
    "path": "demo/importances/draw_obj_heatmap.py",
    "chars": 4357,
    "preview": "import pandas as pd\nimport numpy as np\nimport matplotlib.colors as mcolors\nimport matplotlib.pyplot as plt\nimport matplo"
  },
  {
    "path": "demo/importances/get_feature_importances.py",
    "chars": 14086,
    "preview": "import sys\nfrom pathlib import Path\n\ncurrent_dir = Path(__file__).resolve().parent\npackage_dir = current_dir.parent.pare"
  },
  {
    "path": "demo/jacard_exec_times.csv",
    "chars": 3371,
    "preview": "1000,2000,3000,4000,5000,6000,7000,8000,10000\n0.9119875431060791,2.082753896713257,3.91093111038208,8.557840585708618,12"
  },
  {
    "path": "demo/lsh_exec_times.csv",
    "chars": 3534,
    "preview": "1000,2000,3000,4000,5000,6000,7000,8000,10000\n0.023784637451171875,0.08341550827026367,0.15126347541809082,0.33096837997"
  },
  {
    "path": "demo/random_sample_compiler.py",
    "chars": 3600,
    "preview": "import os\nimport sys\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\npackage_dir = os.path.dirname(current_dir"
  },
  {
    "path": "demo/random_sample_dbms.py",
    "chars": 2393,
    "preview": "current_dir = os.path.dirname(os.path.abspath(__file__))\npackage_dir = os.path.dirname(current_dir)\nsys.path.insert(0, p"
  },
  {
    "path": "demo/sampling/random_sample_compiler.py",
    "chars": 2897,
    "preview": "import os\nimport sys\nfrom pathlib import Path\n\ncurrent_dir = Path(__file__).resolve().parent\npackage_dir = current_dir.p"
  },
  {
    "path": "demo/sampling/random_sample_dbms.py",
    "chars": 2432,
    "preview": "import os\nimport sys\nfrom pathlib import Path\n\ncurrent_dir = Path(__file__).resolve().parent\npackage_dir = current_dir.p"
  },
  {
    "path": "docs/Makefile",
    "chars": 638,
    "preview": "# Minimal makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line, and also\n# from the "
  },
  {
    "path": "docs/make.bat",
    "chars": 804,
    "preview": "@ECHO OFF\r\n\r\npushd %~dp0\r\n\r\nREM Command file for Sphinx documentation\r\n\r\nif \"%SPHINXBUILD%\" == \"\" (\r\n\tset SPHINXBUILD=sp"
  },
  {
    "path": "docs/source/_static/custom.css",
    "chars": 544,
    "preview": ".bd-sidebar-secondary {\n    display: none !important;\n}\n\n/* 让 bd-article 占据 100% 宽度 */\n.bd-main .bd-content {\n    flex-g"
  },
  {
    "path": "docs/source/conf.py",
    "chars": 1675,
    "preview": "# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see t"
  },
  {
    "path": "docs/source/development/api_reference.rst",
    "chars": 195,
    "preview": "API Reference\n=============\n\nThis section provides a detailed reference for the TransOPT API, including descriptions of "
  },
  {
    "path": "docs/source/development/architecture.rst",
    "chars": 2179,
    "preview": "Architecture Overview\n======================\n\nThis section provides an overview of the architecture of the TransOPT soft"
  },
  {
    "path": "docs/source/faq.rst",
    "chars": 592,
    "preview": "FAQ\n================================\n\nThis section addresses common questions and issues that users might encounter when"
  },
  {
    "path": "docs/source/home/feature.html",
    "chars": 6240,
    "preview": "<link rel=\"stylesheet\" href=\"https://use.fontawesome.com/releases/v5.8.1/css/all.css\"\n      integrity=\"sha384-50oBUHEmvp"
  },
  {
    "path": "docs/source/home/guide.html",
    "chars": 2259,
    "preview": "<style>\n\n    .zoom:hover {\n        transform: scale(1.07);\n    }\n</style>\n\n\n<div class=\"container\">\n    <div class=\"row "
  },
  {
    "path": "docs/source/home/portfolio.html",
    "chars": 16248,
    "preview": "<link rel=\"stylesheet\" href=\"https://use.fontawesome.com/releases/v5.8.1/css/all.css\"\n      integrity=\"sha384-50oBUHEmvp"
  },
  {
    "path": "docs/source/index.rst",
    "chars": 2494,
    "preview": ".. TransOPT documentation master file, created by\n   sphinx-quickstart on Mon Aug 19 16:00:09 2024.\n   You can adapt thi"
  },
  {
    "path": "docs/source/installation.rst",
    "chars": 1562,
    "preview": "Installation Guide\n==================\n\nThis section will guide you through the steps required to install TransOPT on you"
  },
  {
    "path": "docs/source/quickstart.rst",
    "chars": 1482,
    "preview": "Quick Start\n======================\n\nTransOPT is a sophisticated system designed to facilitate transfer optimization serv"
  },
  {
    "path": "docs/source/usage/TOS.bib",
    "chars": 96116,
    "preview": "%!BibTeX\n\n@article{QureshiIGKWUHLYA23,\n    author       = {Rizwan Qureshi and\n                    Muhammad Irfan and\n   "
  },
  {
    "path": "docs/source/usage/algorithms.rst",
    "chars": 13595,
    "preview": "Algorithmic objects\n===================\n\n.. admonition:: Overview\n   :class: info\n   \n   - :ref:`Register <register-new-"
  },
  {
    "path": "docs/source/usage/cli.rst",
    "chars": 4964,
    "preview": ".. _command_line_usage:\n\nCommand Line\n===============================\n\nTransOPT provides a command-line interface (CLI) "
  },
  {
    "path": "docs/source/usage/data_manage.rst",
    "chars": 22670,
    "preview": "Data Management\n===============\n\nThe `datamanager` module is designed to manage data generated during optimization tasks"
  },
  {
    "path": "docs/source/usage/problems.rst",
    "chars": 52857,
    "preview": "Benchmark Problems\n==================\nThis\n\n.. admonition:: Overview\n   :class: info\n\n   - :ref:`Register <registering-n"
  },
  {
    "path": "docs/source/usage/results.rst",
    "chars": 14514,
    "preview": "Results Analysis\n================\n\n\n.. admonition:: Overview\n   :class: info\n\n   - :ref:`Register a New Results Analysis"
  },
  {
    "path": "docs/source/usage/visualization.rst",
    "chars": 4994,
    "preview": "Visualization\n===============\n\nThis section demonstrates various visualization techniques used in TransOPT.\n\nData Filter"
  },
  {
    "path": "extra_requirements/analysis.json",
    "chars": 80,
    "preview": "{\n    \"analysis\": [\"pandas\", \"tikzplotlib\", \"pdf2image\", \"seaborn\", \"Pillow\"] \n}"
  },
  {
    "path": "extra_requirements/remote.json",
    "chars": 49,
    "preview": "{\n    \"remote\": [\"flask\", \"requests\", \"celery\"]\n}"
  },
  {
    "path": "requirements.txt",
    "chars": 461,
    "preview": "scipy>=1.4.1\nnumpy>=1.18.1\nConfigSpace>=0.4.12\nscikit-learn\nopenml\nmatplotlib\ntorch\ntorchvision\ngpytorch\nGPyOpt\ngym\nsobo"
  },
  {
    "path": "resources/docker/absolut_image/Dockerfile",
    "chars": 582,
    "preview": "FROM ubuntu:latest\n\nRUN apt-get update && \\\n    apt-get install -y git wget unzip build-essential\n\nENV INSTALL_DIR=/usr/"
  },
  {
    "path": "resources/docker/absolut_image/prepare_antigen.sh",
    "chars": 1024,
    "preview": "#!/bin/bash\n\n# 检查是否提供了 antigen 参数\nif [ -z \"$1\" ]; then\n    echo \"Usage: $0 <antigen>\"\n    exit 1\nfi\n\nANTIGEN=$1\nINSTALL_"
  },
  {
    "path": "scripts/init_csstuning.sh",
    "chars": 221,
    "preview": "#!/bin/bash\npip install transopt_external/csstuning\n\nbash transopt_external/csstuning/cssbench/compiler/docker/build_doc"
  },
  {
    "path": "scripts/init_docker.sh",
    "chars": 1131,
    "preview": "#!/bin/bash\n\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" &>/dev/null && pwd)\"\nDOCKER_ROOT_DIR=\"$SCRIPT_DIR/../reso"
  },
  {
    "path": "setup.py",
    "chars": 2753,
    "preview": "import os\nimport json\nfrom setuptools import setup, find_packages\nimport subprocess\n\ndef get_extra_requirements(folder='"
  },
  {
    "path": "tests/EXP_NSGA2.py",
    "chars": 1902,
    "preview": "import numpy as np\nfrom pymoo.algorithms.moo.nsga2 import NSGA2\nfrom pymoo.optimize import minimize\nfrom pymoo.core.prob"
  },
  {
    "path": "tests/EXP_NSGA2_restart.py",
    "chars": 6700,
    "preview": "import numpy as np\nfrom pymoo.algorithms.moo.nsga2 import NSGA2\nfrom pymoo.optimize import minimize\nfrom pymoo.core.prob"
  },
  {
    "path": "tests/EXP_bohb.py",
    "chars": 1267,
    "preview": "from bohb import BOHB\nimport bohb.configspace as cs\nfrom transopt.benchmark.HPO.HPO import HPO_ERM\nimport numpy as np\n\n#"
  },
  {
    "path": "tests/EXP_grid.py",
    "chars": 1953,
    "preview": "import numpy as np\nfrom transopt.benchmark.HPO.HPO import HPO_ERM\nfrom scipy.stats import qmc\n\ndef sobol_search(n_sample"
  },
  {
    "path": "tests/EXP_hebo.py",
    "chars": 1332,
    "preview": "import numpy as np\nfrom hebo.design_space.design_space import DesignSpace\nfrom hebo.optimizers.hebo import HEBO\nfrom tra"
  },
  {
    "path": "tests/EXP_hyperopt.py",
    "chars": 1611,
    "preview": "from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\nfrom transopt.benchmark.HPO.HPO import HPO_ERM\nimport numpy as np\n"
  },
  {
    "path": "tests/EXP_random.py",
    "chars": 2124,
    "preview": "import numpy as np\nfrom transopt.benchmark.HPO.HPO import HPO_ERM\nimport random\n\ndef random_search(n_trials, task_name, "
  },
  {
    "path": "tests/EXP_smac.py",
    "chars": 1665,
    "preview": "from ConfigSpace import ConfigurationSpace\nimport ConfigSpace as cs\nimport numpy as np\nimport time\nfrom smac import Hype"
  },
  {
    "path": "tests/EXP_tpe.py",
    "chars": 1538,
    "preview": "import ConfigSpace as cs\nimport time\nimport numpy as np\nfrom typing import Any, Dict, List, Optional, Protocol, Tuple\n\nf"
  },
  {
    "path": "tests/data_analysis.py",
    "chars": 8811,
    "preview": "import os\nimport json\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pymoo.util.nds.non_dominated_sor"
  },
  {
    "path": "transopt/ResultAnalysis/AnalysisBase.py",
    "chars": 7783,
    "preview": "import abc\nimport json\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom typing import Dict, Ha"
  },
  {
    "path": "transopt/ResultAnalysis/AnalysisPipeline.py",
    "chars": 904,
    "preview": "from transopt.ResultAnalysis.PlotAnalysis import plot_registry\nfrom transopt.ResultAnalysis.TableAnalysis import table_r"
  },
  {
    "path": "transopt/ResultAnalysis/AnalysisReport.py",
    "chars": 11619,
    "preview": "import os\nfrom pdf2image import convert_from_path\nfrom transopt.ResultAnalysis.ReportNote import Notes\n\n\ndef pdf_to_png("
  },
  {
    "path": "transopt/ResultAnalysis/CasualAnalysis.py",
    "chars": 480,
    "preview": "\nfrom transopt.ResultAnalysis.PlotAnalysis import plot_registry\nfrom transopt.ResultAnalysis.TableAnalysis import table_"
  },
  {
    "path": "transopt/ResultAnalysis/CompileTex.py",
    "chars": 1315,
    "preview": "import os\nimport subprocess\nimport shutil\n\n\ndef compile_tex(tex_path, output_folder):\n    # 保存当前工作目录\n    original_cwd = "
  },
  {
    "path": "transopt/ResultAnalysis/CorrelationAnalysis.py",
    "chars": 1071,
    "preview": "\n\nimport numpy as np\nimport dcor\nfrom sklearn.metrics import mutual_info_score\nfrom transopt.ResultAnalysis.AnalysisBase"
  },
  {
    "path": "transopt/ResultAnalysis/MakeGif.py",
    "chars": 2172,
    "preview": "import os\nfrom PIL import Image\n\ndef make_gif(folder_path):\n    # 获取文件夹中的所有图片文件\n    image_files = [file for file in os.l"
  },
  {
    "path": "transopt/ResultAnalysis/PFAnalysis.py",
    "chars": 595,
    "preview": "import numpy as np\nfrom sklearn.metrics import mutual_info_score\n\nfrom transopt.ResultAnalysis.AnalysisBase import Analy"
  },
  {
    "path": "transopt/ResultAnalysis/PlotAnalysis.py",
    "chars": 25836,
    "preview": "import numpy as np\nfrom collections import Counter, defaultdict\nfrom transopt.ResultAnalysis.AnalysisBase import Analysi"
  },
  {
    "path": "transopt/ResultAnalysis/ReportNote.py",
    "chars": 923,
    "preview": "# There are some explanation about figures and tables\r\nNotes = {\r\n    'box': 'The box plot compares the performance of d"
  },
  {
    "path": "transopt/ResultAnalysis/TableAnalysis.py",
    "chars": 5863,
    "preview": "import numpy as np\nfrom collections import defaultdict\nfrom transopt.utils.sk import Rx\nimport scipy\nfrom transopt.Resul"
  },
  {
    "path": "transopt/ResultAnalysis/TableToLatex.py",
    "chars": 4642,
    "preview": "import numpy as np\nfrom typing import Union, Dict\n\n\ndef matrix_to_latex(Data: Dict, col_names, row_names, caption, oder="
  },
  {
    "path": "transopt/ResultAnalysis/TrackOptimization.py",
    "chars": 426,
    "preview": "import numpy as np\nfrom collections import Counter, defaultdict\nfrom transopt.ResultAnalysis.AnalysisBase import Analysi"
  },
  {
    "path": "transopt/ResultAnalysis/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "transopt/__init__.py",
    "chars": 3,
    "preview": "\n\n\n"
  },
  {
    "path": "transopt/agent/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "transopt/agent/app.py",
    "chars": 10035,
    "preview": "import json\nimport os\nfrom multiprocessing import Process, Manager\n\nfrom flask import Flask, jsonify, request\nfrom flask"
  },
  {
    "path": "transopt/agent/chat/openai_chat.py",
    "chars": 22991,
    "preview": "import json\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Union\n\ni"
  },
  {
    "path": "transopt/agent/chat/prompt",
    "chars": 1278,
    "preview": "\n\nYou are an agent of the \"Transfer Optimization System,\" designed to solve optimization problems. The system can solve "
  },
  {
    "path": "transopt/agent/chat/prompt.bak",
    "chars": 2627,
    "preview": "Please transform my optimization group description into a JSON format according to the following template. Ensure the de"
  },
  {
    "path": "transopt/agent/chat/yaml_generator.py",
    "chars": 1913,
    "preview": "from pathlib import Path\nfrom typing import Any, Dict\n\nimport yaml\nfrom transopt.utils.log import logger\nfrom agent.chat"
  },
  {
    "path": "transopt/agent/config.py",
    "chars": 1199,
    "preview": "\nclass Config:\n    DEBUG = True\n    OPENAI_API_KEY = \"sk-1XGNThXZQVYh6EI25b44Bb74940d4eEdBdDa81723e00C794\"\n    OPENAI_UR"
  },
  {
    "path": "transopt/agent/registry.py",
    "chars": 1073,
    "preview": "class Registry:\n    def __init__(self):\n        self._registry = {}\n\n    def register(self, name=None, cls=None, **kwarg"
  },
  {
    "path": "transopt/agent/run_cli.py",
    "chars": 5287,
    "preview": "import os\nimport traceback\nimport argparse\nfrom services import Services\n\nos.environ[\"MKL_NUM_THREADS\"] = \"1\"\nos.environ"
  },
  {
    "path": "transopt/agent/services.py",
    "chars": 26191,
    "preview": "import os\nimport signal\nimport time\nfrom multiprocessing import Manager, Process\n\nimport numpy as np\n\nfrom transopt.agen"
  },
  {
    "path": "transopt/agent/testood.py",
    "chars": 6948,
    "preview": "import logging\nimport time\nfrom typing import Dict, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport "
  },
  {
    "path": "transopt/analysis/compile_tex.py",
    "chars": 1315,
    "preview": "import os\nimport subprocess\nimport shutil\n\n\ndef compile_tex(tex_path, output_folder):\n    # 保存当前工作目录\n    original_cwd = "
  },
  {
    "path": "transopt/analysis/effect_size.py",
    "chars": 1130,
    "preview": "import os\nimport json\nimport numpy as np\nfrom transopt.utils.sk import Rx\nfrom matplotlib import pyplot as plt\n\n\nplot_di"
  },
  {
    "path": "transopt/analysis/mds.py",
    "chars": 4351,
    "preview": "import numpy as np\nfrom sklearn.manifold import MDS\nfrom scipy.spatial.distance import pdist, squareform\nimport matplotl"
  },
  {
    "path": "transopt/analysis/parameter_network.py",
    "chars": 3544,
    "preview": "import os\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom itertools im"
  },
  {
    "path": "transopt/analysis/table.py",
    "chars": 4308,
    "preview": "import numpy as np\nfrom collections import defaultdict\nfrom transopt.utils.sk import Rx\nimport scipy\nimport os\nfrom mult"
  },
  {
    "path": "transopt/analysis/table_to_latex.py",
    "chars": 4614,
    "preview": "import numpy as np\nfrom typing import Union, Dict\n\n\ndef matrix_to_latex(Data: Dict, col_names, row_names, caption, oder="
  },
  {
    "path": "transopt/benchmark/CPD/__init__.py",
    "chars": 105,
    "preview": "from transopt.benchmark.CPD.PCM.pcm import PCM\nfrom transopt.benchmark.CPD.Absolut.absolut import Absolut"
  },
  {
    "path": "transopt/benchmark/CSSTuning/Compiler.py",
    "chars": 4843,
    "preview": "import numpy as np\nfrom csstuning.compiler.compiler_benchmark import GCCBenchmark, LLVMBenchmark\n\nfrom transopt.agent.re"
  },
  {
    "path": "transopt/benchmark/CSSTuning/DBMS.py",
    "chars": 2375,
    "preview": "import numpy as np\nfrom csstuning.dbms.dbms_benchmark import MySQLBenchmark\n\nfrom transopt.agent.registry import problem"
  },
  {
    "path": "transopt/benchmark/CSSTuning/__init__.py",
    "chars": 117,
    "preview": "from transopt.benchmark.CSSTuning.Compiler import GCCTuning\nfrom transopt.benchmark.CSSTuning.DBMS import MySQLTuning"
  },
  {
    "path": "transopt/benchmark/DownloadBench/references",
    "chars": 77,
    "preview": "https://github.com/automl/HPOBench\n\nhttps://github.com/releaunifreiburg/HPO-B"
  },
  {
    "path": "transopt/benchmark/HBOROB/algorithms.py",
    "chars": 4952,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.autograd as autograd\n\nimport copy\nimport"
  },
  {
    "path": "transopt/benchmark/HBOROB/hporobust.py",
    "chars": 958,
    "preview": "# Install robustbench if you haven't already\n# !pip install robustbench\n\nfrom robustbench.utils import load_model\nfrom r"
  },
  {
    "path": "transopt/benchmark/HBOROB/test.py",
    "chars": 1530,
    "preview": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom robustbench.data import load_cifar10, load_cifar"
  },
  {
    "path": "transopt/benchmark/HPO/HPO.py",
    "chars": 19104,
    "preview": "import collections\nimport os\nimport random\nimport time\nimport json\nfrom typing import Dict, Union\nfrom tqdm import tqdm\n"
  },
  {
    "path": "transopt/benchmark/HPO/HPOAdaBoost.py",
    "chars": 17788,
    "preview": "import os\nimport time\nimport logging\nimport torch\nimport numpy as np\nimport xgboost as xgb\nfrom typing import Union, Tup"
  },
  {
    "path": "transopt/benchmark/HPO/HPOSVM.py",
    "chars": 14718,
    "preview": "import logging\nimport time\nimport numpy as np\nfrom scipy import sparse\nfrom typing import Union, Tuple, Dict, List\nfrom "
  },
  {
    "path": "transopt/benchmark/HPO/HPOXGBoost.py",
    "chars": 17783,
    "preview": "import os\nimport time\nimport logging\nimport torch\nimport numpy as np\nimport xgboost as xgb\nfrom typing import Union, Tup"
  },
  {
    "path": "transopt/benchmark/HPO/__init__.py",
    "chars": 179,
    "preview": "from transopt.benchmark.HPO.HPOSVM import SupportVectorMachine\nfrom transopt.benchmark.HPO.HPOXGBoost import XGBoostBenc"
  },
  {
    "path": "transopt/benchmark/HPO/algorithms.py",
    "chars": 9214,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport copy\nfrom collections import OrderedDict\n"
  },
  {
    "path": "transopt/benchmark/HPO/augmentation.py",
    "chars": 14784,
    "preview": "import torch\nimport numpy as np\nimport random\nfrom transopt.benchmark.HPO.image_options import *\n\n\ndef mixup_data(x, y, "
  },
  {
    "path": "transopt/benchmark/HPO/datasets.py",
    "chars": 18818,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport os\nimport numpy as np\nimport torch\nfrom PI"
  },
  {
    "path": "transopt/benchmark/HPO/fast_data_loader.py",
    "chars": 1874,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport torch\n\nclass _InfiniteSampler(torch.utils"
  },
  {
    "path": "transopt/benchmark/HPO/hparams_registry.py",
    "chars": 4519,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport numpy as np\n\n\ndef get_hparams(algorithm, d"
  },
  {
    "path": "transopt/benchmark/HPO/image_options.py",
    "chars": 2843,
    "preview": "from PIL import Image, ImageEnhance, ImageOps\nimport random\n\n\nclass ShearX(object):\n    def __init__(self, fillcolor=(12"
  },
  {
    "path": "transopt/benchmark/HPO/misc.py",
    "chars": 3298,
    "preview": "import math\nimport hashlib\nimport sys\nfrom collections import OrderedDict\nfrom numbers import Number\nimport operator\n\nim"
  },
  {
    "path": "transopt/benchmark/HPO/networks.py",
    "chars": 13534,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport copy\n\nimport numpy as np\nimport torch\nimp"
  },
  {
    "path": "transopt/benchmark/HPO/test_model.py",
    "chars": 2265,
    "preview": "import os\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom torchvision.trans"
  },
  {
    "path": "transopt/benchmark/HPO/visualization.py",
    "chars": 2799,
    "preview": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\nfrom torchvision import datasets, t"
  },
  {
    "path": "transopt/benchmark/HPO/wide_resnet.py",
    "chars": 3242,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n\"\"\"\nFrom https://github.com/meliketoy/wide-resne"
  },
  {
    "path": "transopt/benchmark/HPOB/HpobBench.py",
    "chars": 4678,
    "preview": "import copy\n\nimport numpy as np\nimport json\n\nimport os\nimport matplotlib.pyplot as plt\nos.environ['OMP_NUM_THREADS'] = \""
  },
  {
    "path": "transopt/benchmark/HPOB/plot.py",
    "chars": 418,
    "preview": "import matplotlib.pyplot as plt\nimport numpy as np\n\n# 假设你有一个N*1的数组\na = [0.62559]*1000\nb = [0.31532]*50\nc = [0.22537] * 2"
  },
  {
    "path": "transopt/benchmark/HPOOOD/algorithms.py",
    "chars": 87763,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport torch\nimport torch.nn as nn\nimport torch."
  },
  {
    "path": "transopt/benchmark/HPOOOD/collect_results.py",
    "chars": 6408,
    "preview": "import os\nimport numpy as np\nimport json\nimport pandas as pd\nimport re\n\nimport matplotlib.pyplot as plt\n\n\n\nout_put_dir ="
  },
  {
    "path": "transopt/benchmark/HPOOOD/download.py",
    "chars": 10561,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nfrom collections import defaultdict\nfrom torchvi"
  },
  {
    "path": "transopt/benchmark/HPOOOD/fast_data_loader.py",
    "chars": 2156,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport torch\n\nclass _InfiniteSampler(torch.utils"
  },
  {
    "path": "transopt/benchmark/HPOOOD/hparams_registry.py",
    "chars": 9920,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport numpy as np\n\n\ndef _define_hparam(hparams, "
  },
  {
    "path": "transopt/benchmark/HPOOOD/hpoood.py",
    "chars": 16331,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport numpy as np\n\nimport numpy as np\nimport tor"
  },
  {
    "path": "transopt/benchmark/HPOOOD/misc.py",
    "chars": 18887,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n\"\"\"\nThings that don't belong anywhere else\n\"\"\"\n\n"
  },
  {
    "path": "transopt/benchmark/HPOOOD/networks.py",
    "chars": 7157,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport torch\nimport torch.nn as nn\nimport torch."
  },
  {
    "path": "transopt/benchmark/HPOOOD/ooddatasets.py",
    "chars": 21288,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport os\nimport torch\nfrom PIL import Image, Im"
  },
  {
    "path": "transopt/benchmark/HPOOOD/wide_resnet.py",
    "chars": 3242,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n\"\"\"\nFrom https://github.com/meliketoy/wide-resne"
  },
  {
    "path": "transopt/benchmark/RL/LunarlanderBenchmark.py",
    "chars": 11914,
    "preview": "import gym\nimport logging\nimport random\nimport numpy as np\nimport ConfigSpace as CS\nimport matplotlib.pyplot as plt\nfrom"
  },
  {
    "path": "transopt/benchmark/RL/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "transopt/benchmark/__init__.py",
    "chars": 74,
    "preview": "# from transopt.benchmark.instantiate_problems import InstantiateProblems\n"
  },
  {
    "path": "transopt/benchmark/instantiate_problems.py",
    "chars": 1492,
    "preview": "from transopt.agent.registry import problem_registry\n# from transopt.benchmark.problem_base.tab_problem import TabularPr"
  },
  {
    "path": "transopt/benchmark/problem_base/__init__.py",
    "chars": 287,
    "preview": "# from benchmark.problem_base.base import ProblemBase\n# from benchmark.problem_base.non_tab_problem import NonTabularPro"
  },
  {
    "path": "transopt/benchmark/problem_base/base.py",
    "chars": 5665,
    "preview": "\"\"\" Base-class of all benchmarks \"\"\"\n\nimport abc\nimport logging\n\nfrom numpy.random.mtrand import RandomState as RandomSt"
  },
  {
    "path": "transopt/benchmark/problem_base/non_tab_problem.py",
    "chars": 2375,
    "preview": "\"\"\" Base-class of configuration optimization benchmarks \"\"\"\nimport json\nimport logging\nimport os\nfrom pathlib import Pat"
  },
  {
    "path": "transopt/benchmark/problem_base/tab_problem.py",
    "chars": 12034,
    "preview": "import logging\nimport os\nfrom pathlib import Path\nfrom typing import Dict, List, Union\nfrom urllib.parse import urlparse"
  },
  {
    "path": "transopt/benchmark/problem_base/transfer_problem.py",
    "chars": 8336,
    "preview": "import abc\nimport logging\nimport numpy as np\nfrom typing import Union, Dict, List\n\nfrom transopt.benchmark.problem_base."
  },
  {
    "path": "transopt/benchmark/synthetic/MovingPeakBenchmark.py",
    "chars": 8844,
    "preview": "import logging\nimport numpy as np\nimport ConfigSpace as CS\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing im"
  },
  {
    "path": "transopt/benchmark/synthetic/MultiObjBenchmark.py",
    "chars": 3728,
    "preview": "import os\nimport math\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport ConfigSpace as CS\nfrom ty"
  },
  {
    "path": "transopt/benchmark/synthetic/__init__.py",
    "chars": 696,
    "preview": "from transopt.benchmark.synthetic.synthetic_problems import (\n    # SphereOptBenchmark,\n    # RastriginOptBenchmark,\n   "
  },
  {
    "path": "transopt/benchmark/synthetic/synthetic_problems.py",
    "chars": 48720,
    "preview": "# %matplotlib notebook\n\nimport os\nimport math\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom typ"
  },
  {
    "path": "transopt/datamanager/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "transopt/datamanager/database.py",
    "chars": 24016,
    "preview": "import atexit\nimport json\nimport queue\nimport sqlite3\nimport time\nfrom multiprocessing import Event, Manager, Process, Q"
  },
  {
    "path": "transopt/datamanager/lsh.py",
    "chars": 3879,
    "preview": "import numpy as np\nfrom collections import defaultdict\n\nfrom transopt.datamanager.minhash import MinHasher\n\nclass LSHCac"
  },
  {
    "path": "transopt/datamanager/manager.py",
    "chars": 4162,
    "preview": "# import cProfile\n# import pstats\n\nfrom transopt.datamanager.database import Database\nfrom transopt.datamanager.lsh impo"
  },
  {
    "path": "transopt/datamanager/minhash.py",
    "chars": 2851,
    "preview": "from concurrent.futures import ThreadPoolExecutor\n\nimport mmh3\nimport numpy as np\n\n\nclass MinHasher:\n    def __init__(se"
  },
  {
    "path": "transopt/optimizer/MultiObjOptimizer/CauMOpt.py",
    "chars": 13081,
    "preview": "import numpy as np\nimport GPy\nfrom typing import Dict, Union, List\n\nfrom transopt.optimizer.optimizer_base import BOBase"
  },
  {
    "path": "transopt/optimizer/MultiObjOptimizer/IEIPV.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "transopt/optimizer/MultiObjOptimizer/MoeadEGO.py",
    "chars": 6512,
    "preview": "import GPy, GPyOpt\nimport numpy as np\nfrom typing import Dict, Union, List\n\nfrom transopt.optimizer.optimizer_base impor"
  },
  {
    "path": "transopt/optimizer/MultiObjOptimizer/ParEGO.py",
    "chars": 5513,
    "preview": "import numpy as np\nimport GPy\nfrom typing import Dict, Union, List\n\nfrom transopt.optimizer.optimizer_base import BOBase"
  },
  {
    "path": "transopt/optimizer/MultiObjOptimizer/SMSEGO.py",
    "chars": 5165,
    "preview": "import numpy as np\nimport GPy\nfrom typing import Dict, Union, List\nfrom transopt.optimizer.optimizer_base import BOBase\n"
  },
  {
    "path": "transopt/optimizer/MultiObjOptimizer/__init__.py",
    "chars": 255,
    "preview": "from transopt.optimizer.MultiObjOptimizer.ParEGO import ParEGO\nfrom transopt.optimizer.MultiObjOptimizer.SMSEGO import S"
  },
  {
    "path": "transopt/optimizer/SingleObjOptimizer/KrigingOptimizer.py",
    "chars": 9043,
    "preview": "import GPy\nimport numpy as np\nfrom pymoo.core.problem import Problem\nfrom pymoo.algorithms.soo.nonconvex.ga import GA\nfr"
  },
  {
    "path": "transopt/optimizer/SingleObjOptimizer/LFL.py",
    "chars": 14218,
    "preview": "import numpy as np\nimport GPy\nfrom paramz import ObsAr\nfrom optimizer.acquisition_function.get_acf import get_ACF\nfrom t"
  },
  {
    "path": "transopt/optimizer/SingleObjOptimizer/MetaLearningOptimizer.py",
    "chars": 9952,
    "preview": "import numpy as np\nimport GPy\nimport GPyOpt\nfrom GPy import util\nfrom paramz import ObsAr\n\nfrom GPy.inference.latent_fun"
  },
  {
    "path": "transopt/optimizer/SingleObjOptimizer/MultitaskOptimizer.py",
    "chars": 7569,
    "preview": "import numpy as np\nimport GPy\nfrom typing import Dict, Union, List\nfrom transopt.optimizer.optimizer_base import BOBase\n"
  },
  {
    "path": "transopt/optimizer/SingleObjOptimizer/PROptimizer.py",
    "chars": 9179,
    "preview": "import GPy\nimport numpy as np\nfrom pymoo.core.problem import Problem\nfrom pymoo.algorithms.soo.nonconvex.ga import GA\nfr"
  },
  {
    "path": "transopt/optimizer/SingleObjOptimizer/RBFNOptimizer.py",
    "chars": 9834,
    "preview": "import GPy\nimport numpy as np\nfrom pymoo.core.problem import Problem\nfrom pymoo.algorithms.soo.nonconvex.ga import GA\nfr"
  },
  {
    "path": "transopt/optimizer/SingleObjOptimizer/RGPEOptimizer.py",
    "chars": 10187,
    "preview": "import numpy as np\nimport GPy\n\nfrom transopt.utils.serialization import ndarray_to_vectors\nfrom agent.registry import op"
  },
  {
    "path": "transopt/optimizer/SingleObjOptimizer/TPEOptimizer.py",
    "chars": 5157,
    "preview": "import numpy as np\nfrom typing import Dict, List, Union\nfrom transopt.optimizer.optimizer_base import BOBase\nfrom transo"
  },
  {
    "path": "transopt/optimizer/SingleObjOptimizer/VizerOptimizer.py",
    "chars": 7517,
    "preview": "import numpy as np\n\nfrom transopt.utils.serialization import ndarray_to_vectors\nfrom agent.registry import optimizer_reg"
  },
  {
    "path": "transopt/optimizer/SingleObjOptimizer/__init__.py",
    "chars": 593,
    "preview": "from transopt.optimizer.SingleObjOptimizer.KrigingOptimizer import KrigingGA\nfrom transopt.optimizer.SingleObjOptimizer."
  },
  {
    "path": "transopt/optimizer/__init__.py",
    "chars": 325,
    "preview": "# from transopt.optimizer.model.get_model import get_model\n# from transopt.optimizer.sampler.get_sampler import get_samp"
  },
  {
    "path": "transopt/optimizer/acquisition_function/ConformalLCB.py",
    "chars": 1958,
    "preview": "# Copyright (c) 2016, the GPyOpt Authors\n# Licensed under the BSD 3-clause license (see LICENSE.txt)\n\nfrom GPyOpt.acquis"
  },
  {
    "path": "transopt/optimizer/acquisition_function/__init__.py",
    "chars": 1629,
    "preview": "from transopt.optimizer.acquisition_function.sequential import Sequential\n\nfrom transopt.optimizer.acquisition_function."
  },
  {
    "path": "transopt/optimizer/acquisition_function/acf_base.py",
    "chars": 3837,
    "preview": "# Copyright (c) 2016, the GPyOpt Authors\n# Licensed under the BSD 3-clause license (see LICENSE.txt)\nimport numpy as np\n"
  },
  {
    "path": "transopt/optimizer/acquisition_function/ei.py",
    "chars": 2089,
    "preview": "import copy\n\nfrom GPyOpt.acquisitions.base import AcquisitionBase\nfrom GPyOpt.core.task.cost import constant_cost_withGr"
  },
  {
    "path": "transopt/optimizer/acquisition_function/get_acf.py",
    "chars": 898,
    "preview": "\nfrom transopt.agent.registry import acf_registry\n\ndef get_acf(acf_name, **kwargs):\n    \"\"\"Create the optimizer object.\""
  },
  {
    "path": "transopt/optimizer/acquisition_function/lcb.py",
    "chars": 1807,
    "preview": "# Copyright (c) 2016, the GPyOpt Authors\n# Licensed under the BSD 3-clause license (see LICENSE.txt)\n\nfrom GPyOpt.acquis"
  },
  {
    "path": "transopt/optimizer/acquisition_function/model_manage/CMAESBest.py",
    "chars": 3026,
    "preview": "import math\n\nimport numpy as np\nfrom GPyOpt import Design_space\nfrom pymoo.algorithms.soo.nonconvex.cmaes import CMAES\nf"
  },
  {
    "path": "transopt/optimizer/acquisition_function/model_manage/CMAESGeneration.py",
    "chars": 3015,
    "preview": "import math\nimport numpy as np\nfrom pymoo.core.problem import Problem\nfrom GPyOpt import Design_space\nfrom pymoo.algorit"
  },
  {
    "path": "transopt/optimizer/acquisition_function/model_manage/CMAESPreSelect.py",
    "chars": 3472,
    "preview": "import math\nimport numpy as np\nfrom pymoo.core.problem import Problem\nfrom GPyOpt import Design_space\nfrom pymoo.algorit"
  },
  {
    "path": "transopt/optimizer/acquisition_function/model_manage/DEBest.py",
    "chars": 2997,
    "preview": "import math\nimport numpy as np\nfrom pymoo.core.problem import Problem\nfrom GPyOpt import Design_space\nfrom pymoo.algorit"
  },
  {
    "path": "transopt/optimizer/acquisition_function/model_manage/DEGeneration.py",
    "chars": 2988,
    "preview": "import math\nimport numpy as np\nfrom pymoo.core.problem import Problem\nfrom GPyOpt import Design_space\nfrom pymoo.algorit"
  },
  {
    "path": "transopt/optimizer/acquisition_function/model_manage/DEPreSelect.py",
    "chars": 3445,
    "preview": "import math\nimport numpy as np\nfrom pymoo.core.problem import Problem\nfrom GPyOpt import Design_space\nfrom pymoo.algorit"
  },
  {
    "path": "transopt/optimizer/acquisition_function/model_manage/GABest.py",
    "chars": 2997,
    "preview": "import math\nimport numpy as np\nfrom pymoo.core.problem import Problem\nfrom GPyOpt import Design_space\nfrom pymoo.algorit"
  },
  {
    "path": "transopt/optimizer/acquisition_function/model_manage/GAGeneration.py",
    "chars": 2988,
    "preview": "import math\nimport numpy as np\nfrom pymoo.core.problem import Problem\nfrom GPyOpt import Design_space\nfrom pymoo.algorit"
  },
  {
    "path": "transopt/optimizer/acquisition_function/model_manage/GAPreSelect.py",
    "chars": 3445,
    "preview": "import math\nimport numpy as np\nfrom pymoo.core.problem import Problem\nfrom GPyOpt import Design_space\nfrom pymoo.algorit"
  },
  {
    "path": "transopt/optimizer/acquisition_function/model_manage/PSOBest.py",
    "chars": 3003,
    "preview": "import math\nimport numpy as np\nfrom pymoo.core.problem import Problem\nfrom GPyOpt import Design_space\nfrom pymoo.algorit"
  },
  {
    "path": "transopt/optimizer/acquisition_function/model_manage/PSOGeneration.py",
    "chars": 2994,
    "preview": "import math\nimport numpy as np\nfrom pymoo.core.problem import Problem\nfrom GPyOpt import Design_space\nfrom pymoo.algorit"
  },
  {
    "path": "transopt/optimizer/acquisition_function/model_manage/PSOPreSelect.py",
    "chars": 3451,
    "preview": "import math\nimport numpy as np\nfrom pymoo.core.problem import Problem\nfrom GPyOpt import Design_space\nfrom pymoo.algorit"
  },
  {
    "path": "transopt/optimizer/acquisition_function/moeadego.py",
    "chars": 1813,
    "preview": "import GPy\nimport numpy as np\nimport scipy.optimize as opt\nfrom scipy.stats import *\nfrom scipy.spatial import distance\n"
  },
  {
    "path": "transopt/optimizer/acquisition_function/pi.py",
    "chars": 2019,
    "preview": "import copy\n\nfrom GPyOpt.core.task.cost import constant_cost_withGradients\nfrom GPyOpt.util.general import get_quantiles"
  },
  {
    "path": "transopt/optimizer/acquisition_function/piei.py",
    "chars": 2353,
    "preview": "import copy\n\nfrom GPyOpt.acquisitions.base import AcquisitionBase\nfrom GPyOpt.core.task.cost import constant_cost_withGr"
  },
  {
    "path": "transopt/optimizer/acquisition_function/sequential.py",
    "chars": 1491,
    "preview": "from GPyOpt.core.evaluators.base import EvaluatorBase\n\n\nclass Sequential(EvaluatorBase):\n    \"\"\"\n    Class for standard "
  },
  {
    "path": "transopt/optimizer/acquisition_function/smsego.py",
    "chars": 1684,
    "preview": "import GPy\nimport numpy as np\nimport scipy.optimize as opt\nfrom scipy.stats import *\nfrom scipy.spatial import distance\n"
  },
  {
    "path": "transopt/optimizer/acquisition_function/taf.py",
    "chars": 2813,
    "preview": "import copy\n\nimport numpy as np\nfrom GPyOpt.core.task.cost import constant_cost_withGradients\nfrom GPyOpt.util.general i"
  },
  {
    "path": "transopt/optimizer/construct_optimizer.py",
    "chars": 4650,
    "preview": "\nfrom transopt.agent.registry import (acf_registry, sampler_registry,\n                                     selector_regi"
  },
  {
    "path": "transopt/optimizer/model/HyperBO.py",
    "chars": 3860,
    "preview": "import random\nimport time\n\nfrom external.hyperbo.basics import definitions as defs\nfrom external.hyperbo.basics import p"
  },
  {
    "path": "transopt/optimizer/model/__init__.py",
    "chars": 537,
    "preview": "from transopt.optimizer.model.gp import GP\nfrom transopt.optimizer.model.pr import PR\nfrom transopt.optimizer.model.rf i"
  },
  {
    "path": "transopt/optimizer/model/bohb.py",
    "chars": 6295,
    "preview": "import copy\n\nimport numpy as np\nimport scipy\nimport statsmodels.api as sm\nimport dask\n\n\nclass KDEMultivariate(sm.nonpara"
  },
  {
    "path": "transopt/optimizer/model/deepkernel.py",
    "chars": 8931,
    "preview": "\n\"\"\"\nThis FSBO implementation is based on the original implementation from Hadi Samer Jomaa\nfor his work on \"Transfer Le"
  },
  {
    "path": "transopt/optimizer/model/dyhpo.py",
    "chars": 15169,
    "preview": "import logging\nimport os\nfrom copy import deepcopy\nfrom typing import Dict, Tuple\n\nimport gpytorch\nimport numpy as np\nim"
  },
  {
    "path": "transopt/optimizer/model/get_model.py",
    "chars": 393,
    "preview": "from transopt.agent.registry import model_registry\n\n\n\ndef get_model(model_name, **kwargs):\n    \"\"\"Create the optimizer o"
  }
]

// ... and 219 more files (download for full content)

About this extraction

This page contains the full source code of the COLA-Laboratory/TransOPT GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 419 files (1.8 MB), approximately 460.3k tokens, and a symbol index with 2302 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!