Copy disabled (too large)
Download .txt
Showing preview only (11,705K chars total). Download the full file to get everything.
Repository: BlueBrain/BluePyOpt
Branch: master
Commit: 87325945d120
Files: 363
Total size: 30.7 MB
Directory structure:
gitextract_wgjz66lh/
├── .coveragerc
├── .gitattributes
├── .github/
│ └── workflows/
│ ├── build.yml
│ ├── keep-alive.yml
│ ├── mirror-ebrains.yml
│ └── test.yml
├── .gitignore
├── .readthedocs.yaml
├── .zenodo.json
├── AUTHORS.txt
├── COPYING
├── COPYING.lesser
├── Dockerfile
├── LICENSE.txt
├── MANIFEST.in
├── Makefile
├── README.rst
├── bluepyopt/
│ ├── __init__.py
│ ├── api.py
│ ├── deapext/
│ │ ├── CMA_MO.py
│ │ ├── CMA_SO.py
│ │ ├── __init__.py
│ │ ├── algorithms.py
│ │ ├── hype.py
│ │ ├── optimisations.py
│ │ ├── optimisationsCMA.py
│ │ ├── stoppingCriteria.py
│ │ ├── tools/
│ │ │ ├── __init__.py
│ │ │ └── selIBEA.py
│ │ └── utils.py
│ ├── ephys/
│ │ ├── __init__.py
│ │ ├── acc.py
│ │ ├── base.py
│ │ ├── create_acc.py
│ │ ├── create_hoc.py
│ │ ├── efeatures.py
│ │ ├── evaluators.py
│ │ ├── examples/
│ │ │ ├── __init__.py
│ │ │ └── simplecell/
│ │ │ ├── __init__.py
│ │ │ ├── simple.swc
│ │ │ └── simplecell.py
│ │ ├── extra_features_utils.py
│ │ ├── locations.py
│ │ ├── mechanisms.py
│ │ ├── models.py
│ │ ├── morphologies.py
│ │ ├── objectives.py
│ │ ├── objectivescalculators.py
│ │ ├── parameters.py
│ │ ├── parameterscalers/
│ │ │ ├── __init__.py
│ │ │ ├── acc_iexpr.py
│ │ │ └── parameterscalers.py
│ │ ├── protocols.py
│ │ ├── recordings.py
│ │ ├── responses.py
│ │ ├── serializer.py
│ │ ├── simulators.py
│ │ ├── static/
│ │ │ └── arbor_mechanisms.json
│ │ ├── stimuli.py
│ │ └── templates/
│ │ ├── acc/
│ │ │ ├── _json_template.jinja2
│ │ │ ├── decor_acc_template.jinja2
│ │ │ └── label_dict_acc_template.jinja2
│ │ └── cell_template.jinja2
│ ├── evaluators.py
│ ├── ipyp/
│ │ ├── __init__.py
│ │ └── bpopt_tasksdb.py
│ ├── neuroml/
│ │ ├── NeuroML2_mechanisms/
│ │ │ ├── Ca.channel.nml
│ │ │ ├── Ca_HVA.channel.nml
│ │ │ ├── Ca_LVAst.channel.nml
│ │ │ ├── Ih.channel.nml
│ │ │ ├── Im.channel.nml
│ │ │ ├── K_Pst.channel.nml
│ │ │ ├── K_Tst.channel.nml
│ │ │ ├── KdShu2007.channel.nml
│ │ │ ├── NaTa_t.channel.nml
│ │ │ ├── NaTs2_t.channel.nml
│ │ │ ├── Nap_Et2.channel.nml
│ │ │ ├── SK_E2.channel.nml
│ │ │ ├── SKv3_1.channel.nml
│ │ │ ├── StochKv_deterministic.channel.nml
│ │ │ ├── baseCaDynamics_E2_NML2.nml
│ │ │ └── pas.channel.nml
│ │ ├── __init__.py
│ │ ├── biophys.py
│ │ ├── cell.py
│ │ ├── morphology.py
│ │ └── simulation.py
│ ├── objectives.py
│ ├── optimisations.py
│ ├── parameters.py
│ ├── stoppingCriteria.py
│ ├── tests/
│ │ ├── .gitignore
│ │ ├── __init__.py
│ │ ├── disable_simplecell_scoop.py
│ │ ├── expected_results.json
│ │ ├── test_bluepyopt.py
│ │ ├── test_deapext/
│ │ │ ├── __init__.py
│ │ │ ├── deapext_test_utils.py
│ │ │ ├── test_algorithms.py
│ │ │ ├── test_hype.py
│ │ │ ├── test_optimisations.py
│ │ │ ├── test_optimisationsCMA.py
│ │ │ ├── test_selIBEA.py
│ │ │ ├── test_stoppingCriteria.py
│ │ │ └── test_utils.py
│ │ ├── test_ephys/
│ │ │ ├── __init__.py
│ │ │ ├── test_acc.py
│ │ │ ├── test_create_acc.py
│ │ │ ├── test_create_hoc.py
│ │ │ ├── test_evaluators.py
│ │ │ ├── test_extra_features_utils.py
│ │ │ ├── test_features.py
│ │ │ ├── test_init.py
│ │ │ ├── test_locations.py
│ │ │ ├── test_mechanisms.py
│ │ │ ├── test_models.py
│ │ │ ├── test_morphologies.py
│ │ │ ├── test_objectives.py
│ │ │ ├── test_parameters.py
│ │ │ ├── test_parameterscalers.py
│ │ │ ├── test_protocols.py
│ │ │ ├── test_recordings.py
│ │ │ ├── test_serializer.py
│ │ │ ├── test_simulators.py
│ │ │ ├── test_stimuli.py
│ │ │ ├── testdata/
│ │ │ │ ├── TimeVoltageResponse.csv
│ │ │ │ ├── acc/
│ │ │ │ │ ├── CCell/
│ │ │ │ │ │ ├── CCell.json
│ │ │ │ │ │ ├── CCell_decor.acc
│ │ │ │ │ │ ├── CCell_label_dict.acc
│ │ │ │ │ │ └── simple_axon_replacement.acc
│ │ │ │ │ ├── expsyn/
│ │ │ │ │ │ ├── simple.swc
│ │ │ │ │ │ ├── simple_cell.json
│ │ │ │ │ │ ├── simple_cell_decor.acc
│ │ │ │ │ │ └── simple_cell_label_dict.acc
│ │ │ │ │ ├── l5pc/
│ │ │ │ │ │ ├── C060114A7.asc
│ │ │ │ │ │ ├── C060114A7_axon_replacement.acc
│ │ │ │ │ │ ├── C060114A7_modified.acc
│ │ │ │ │ │ ├── l5pc.json
│ │ │ │ │ │ ├── l5pc_decor.acc
│ │ │ │ │ │ └── l5pc_label_dict.acc
│ │ │ │ │ ├── l5pc_py37/
│ │ │ │ │ │ └── l5pc_decor.acc
│ │ │ │ │ ├── simplecell/
│ │ │ │ │ │ ├── simple.swc
│ │ │ │ │ │ ├── simple_axon_replacement.acc
│ │ │ │ │ │ ├── simple_cell.json
│ │ │ │ │ │ ├── simple_cell_decor.acc
│ │ │ │ │ │ ├── simple_cell_label_dict.acc
│ │ │ │ │ │ └── simple_modified.acc
│ │ │ │ │ └── templates/
│ │ │ │ │ ├── cell_json_template.jinja2
│ │ │ │ │ ├── decor_acc_template.jinja2
│ │ │ │ │ └── label_dict_acc_template.jinja2
│ │ │ │ ├── apic.swc
│ │ │ │ ├── lfpy_soma_time.npy
│ │ │ │ ├── lfpy_soma_voltage.npy
│ │ │ │ ├── lfpy_time.npy
│ │ │ │ ├── lfpy_voltage.npy
│ │ │ │ ├── simple.swc
│ │ │ │ ├── simple.wrong
│ │ │ │ ├── simple_ax1.swc
│ │ │ │ ├── simple_ax2.asc
│ │ │ │ ├── simple_ax2.swc
│ │ │ │ └── test.jinja2
│ │ │ ├── testmodels/
│ │ │ │ ├── __init__.py
│ │ │ │ └── dummycells.py
│ │ │ └── utils.py
│ │ ├── test_evaluators.py
│ │ ├── test_l5pc.py
│ │ ├── test_lfpy.py
│ │ ├── test_neuroml_fcts.py
│ │ ├── test_parameters.py
│ │ ├── test_simplecell.py
│ │ ├── test_stochkv.py
│ │ ├── test_tools.py
│ │ └── testdata/
│ │ └── l5pc_validate_neuron_arbor/
│ │ └── param_values.json
│ └── tools.py
├── cloud-config/
│ ├── README.md
│ ├── config/
│ │ ├── amazon/
│ │ │ ├── README.md
│ │ │ ├── ansible.cfg
│ │ │ ├── create_instance.yaml
│ │ │ ├── gather_config.py
│ │ │ ├── site.yaml
│ │ │ └── vars.yaml
│ │ ├── cluster-user/
│ │ │ ├── README.md
│ │ │ ├── ansible.cfg
│ │ │ ├── hosts
│ │ │ ├── site.yaml
│ │ │ └── vars.yaml
│ │ └── vagrant/
│ │ ├── README.md
│ │ ├── Vagrantfile
│ │ ├── ansible.cfg
│ │ ├── hosts
│ │ ├── site.yaml
│ │ └── vars.yaml
│ └── roles/
│ ├── base/
│ │ └── tasks/
│ │ └── main.yaml
│ ├── deap/
│ │ └── tasks/
│ │ └── main.yaml
│ ├── granule-example/
│ │ └── tasks/
│ │ └── main.yaml
│ ├── neuron/
│ │ └── tasks/
│ │ ├── main.yaml
│ │ └── python27.yaml
│ └── scoop-master/
│ └── tasks/
│ └── main.yaml
├── codecov.yml
├── docs/
│ ├── .gitignore
│ ├── Makefile
│ └── source/
│ ├── .gitignore
│ ├── _templates/
│ │ └── module.rst
│ ├── api.rst
│ ├── conf.py
│ ├── deapext.rst
│ ├── ephys.rst
│ ├── index.rst
│ └── optimisations.rst
├── examples/
│ ├── BluePyOpt-ipyparallel.md
│ ├── README.md
│ ├── __init__.py
│ ├── cma_strategy/
│ │ └── cma.ipynb
│ ├── expsyn/
│ │ ├── .gitignore
│ │ ├── ExpSyn.ipynb
│ │ ├── ExpSyn_arbor.ipynb
│ │ ├── expsyn.py
│ │ ├── generate_acc.py
│ │ └── simple.swc
│ ├── graupnerbrunelstdp/
│ │ ├── checkpoints/
│ │ │ └── .gitignore
│ │ ├── figures/
│ │ │ └── .gitignore
│ │ ├── gbevaluator.py
│ │ ├── graupnerbrunelstdp.ipynb
│ │ ├── run_fit.py
│ │ ├── stdputil.py
│ │ └── test_stdputil.py
│ ├── l5pc/
│ │ ├── .gitignore
│ │ ├── L5PC.ipynb
│ │ ├── L5PC_arbor.ipynb
│ │ ├── benchmark/
│ │ │ ├── get_stats.py
│ │ │ ├── l5pc_benchmark.sbatch
│ │ │ ├── logs/
│ │ │ │ └── .gitignore
│ │ │ ├── run_benchmark.sh
│ │ │ ├── start.sh
│ │ │ └── task_stats.py
│ │ ├── cADpyr_76.hoc
│ │ ├── checkpoints/
│ │ │ └── .gitignore
│ │ ├── config/
│ │ │ ├── features.json
│ │ │ ├── fixed_params.json
│ │ │ ├── mechanisms.json
│ │ │ ├── parameters.json
│ │ │ ├── params.json
│ │ │ └── protocols.json
│ │ ├── convert_noise_exp.py
│ │ ├── convert_params.py
│ │ ├── create_tables.py
│ │ ├── exp_data/
│ │ │ ├── .gitignore
│ │ │ └── noise_i.txt
│ │ ├── figures/
│ │ │ └── .gitignore
│ │ ├── generate_acc.py
│ │ ├── generate_hoc.py
│ │ ├── hocmodel.py
│ │ ├── l5pc_analysis.py
│ │ ├── l5pc_evaluator.py
│ │ ├── l5pc_model.py
│ │ ├── l5pc_validate_neuron_arbor.ipynb
│ │ ├── l5pc_validate_neuron_arbor_pm.py
│ │ ├── mechanisms/
│ │ │ ├── CaDynamics_E2.mod
│ │ │ ├── Ca_HVA.mod
│ │ │ ├── Ca_LVAst.mod
│ │ │ ├── Ih.mod
│ │ │ ├── Im.mod
│ │ │ ├── K_Pst.mod
│ │ │ ├── K_Tst.mod
│ │ │ ├── LICENSE
│ │ │ ├── NaTa_t.mod
│ │ │ ├── NaTs2_t.mod
│ │ │ ├── Nap_Et2.mod
│ │ │ ├── SK_E2.mod
│ │ │ ├── SKv3_1.mod
│ │ │ └── dummy.inc
│ │ ├── morphology/
│ │ │ ├── C060114A7.asc
│ │ │ └── LICENSE
│ │ ├── nsg/
│ │ │ ├── .gitignore
│ │ │ ├── Makefile
│ │ │ └── init.py
│ │ ├── opt_l5pc.py
│ │ ├── opt_l5pc.sh
│ │ ├── tables/
│ │ │ └── .gitignore
│ │ └── tasks2dataframe.py
│ ├── l5pc_lfpy/
│ │ ├── L5PC_LFPy.ipynb
│ │ ├── __init__.py
│ │ ├── extra_features.json
│ │ ├── generate_extra_features.py
│ │ ├── l5pc_lfpy_evaluator.py
│ │ └── l5pc_lfpy_model.py
│ ├── metaparameters/
│ │ ├── .gitignore
│ │ ├── metaparameters.ipynb
│ │ └── twocompartment.swc
│ ├── neuroml/
│ │ └── neuroml.ipynb
│ ├── simplecell/
│ │ ├── .gitignore
│ │ ├── checkpoints/
│ │ │ └── .gitignore
│ │ ├── figures/
│ │ │ └── .gitignore
│ │ ├── generate_acc.py
│ │ ├── generate_hoc.py
│ │ ├── responses.pkl
│ │ ├── simple.swc
│ │ ├── simplecell-paperfig.ipynb
│ │ ├── simplecell.ipynb
│ │ ├── simplecell_arbor.ipynb
│ │ └── simplecell_model.py
│ ├── stochkv/
│ │ ├── .gitignore
│ │ ├── mechanisms/
│ │ │ ├── StochKv.mod
│ │ │ ├── StochKv3.mod
│ │ │ └── dummy.inc
│ │ ├── morphology/
│ │ │ └── simple.swc
│ │ ├── stochkv3cell.hoc
│ │ ├── stochkv3cell.py
│ │ ├── stochkv3cell_det.hoc
│ │ ├── stochkvcell.hoc
│ │ ├── stochkvcell.py
│ │ └── stochkvcell_det.hoc
│ ├── thalamocortical-cell/
│ │ ├── CellEvalSetup/
│ │ │ ├── __init__.py
│ │ │ ├── evaluator.py
│ │ │ ├── protocols.py
│ │ │ ├── template.py
│ │ │ └── tools.py
│ │ ├── LICENSE.txt
│ │ ├── checkpoints/
│ │ │ └── checkpoint.pkl
│ │ ├── config/
│ │ │ ├── features/
│ │ │ │ ├── cAD_ltb.json
│ │ │ │ └── cNAD_ltb.json
│ │ │ ├── params/
│ │ │ │ └── TC.json
│ │ │ ├── protocols/
│ │ │ │ ├── cAD_ltb.json
│ │ │ │ └── cNAD_ltb.json
│ │ │ └── recipes.json
│ │ ├── mechanisms/
│ │ │ ├── SK_E2.mod
│ │ │ ├── TC_HH.mod
│ │ │ ├── TC_ITGHK_Des98.mod
│ │ │ ├── TC_Ih_Bud97.mod
│ │ │ ├── TC_Nap_Et2.mod
│ │ │ ├── TC_cadecay.mod
│ │ │ ├── TC_iA.mod
│ │ │ └── TC_iL.mod
│ │ ├── morphologies/
│ │ │ ├── jy160728_A_idA.asc
│ │ │ └── jy170517_A_idA.asc
│ │ ├── results/
│ │ │ ├── cAD_ltb_params.csv
│ │ │ └── cNAD_ltb_params.csv
│ │ └── thalamocortical-cell_opt.ipynb
│ └── tsodyksmarkramstp/
│ ├── AUTHORS.txt
│ ├── README.md
│ ├── amps.pkl
│ ├── tmevaluator.py
│ ├── tmevaluator_multiplefreqs.py
│ ├── tmodeint.py
│ ├── tmodesolve.py
│ ├── trace.pkl
│ ├── tsodyksmarkramstp.ipynb
│ └── tsodyksmarkramstp_multiplefreqs.ipynb
├── misc/
│ ├── github_wiki/
│ │ ├── bibtex/
│ │ │ ├── mentions_BPO.bib
│ │ │ ├── mentions_BPO_extra.bib
│ │ │ ├── poster_uses_BPO.bib
│ │ │ ├── thesis_mentions_BPO.bib
│ │ │ ├── thesis_uses_BPO.bib
│ │ │ ├── uses_BPO.bib
│ │ │ └── uses_BPO_extra.bib
│ │ └── creates_publication_list_markdown.py
│ └── pytest_migration/
│ └── convert_pytest.sh
├── package.json
├── pyproject.toml
├── pytest.ini
├── requirements.txt
├── requirements_docs.txt
└── tox.ini
================================================
FILE CONTENTS
================================================
================================================
FILE: .coveragerc
================================================
[run]
omit = */tests/*,bluepyopt/_version.py
[report]
omit=bluepyopt/_version.py
================================================
FILE: .gitattributes
================================================
bluepyopt/_version.py export-subst
================================================
FILE: .github/workflows/build.yml
================================================
name: Build
on:
push:
branches:
- master
tags:
- '[0-9]+.[0-9]+.[0-9]+'
jobs:
call-test-workflow:
uses: BlueBrain/BluePyOpt/.github/workflows/test.yml@master
build-tag-n-publish:
name: Build, tag and publish on PyPI
runs-on: ubuntu-latest
needs: call-test-workflow
permissions:
contents: write
steps:
- uses: actions/checkout@v3
- name: Set up Python 3.10
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Bump version and push tag
uses: anothrNick/github-tag-action@1.64.0
if: ${{ !startsWith(github.ref, 'refs/tags/') }}
id: tag
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
WITH_V: false
DEFAULT_BUMP: patch
- name: Build a source tarball and wheel
run: |
pip install build
python -m build
- name: Get and store tag from 'Bump version and push tag' step
if: ${{ !startsWith(github.ref, 'refs/tags/') }}
run: echo "TAG_NAME=${{ steps.tag.outputs.new_tag }}" >> $GITHUB_ENV
- name: Get and store tag from triggered tag push
if: ${{ startsWith(github.ref, 'refs/tags/') }}
run: echo "TAG_NAME=${{ github.ref_name }}" >> $GITHUB_ENV
- name: Release
uses: softprops/action-gh-release@v1
with:
tag_name: ${{ env.TAG_NAME }}
name: ${{ env.TAG_NAME }}
generate_release_notes: true
- name: Publish package to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.PYPI_PASSWORD }}
================================================
FILE: .github/workflows/keep-alive.yml
================================================
name: Keep-alive
on:
schedule:
# Runs every sunday at 3 a.m.
- cron: '0 3 * * SUN'
jobs:
call-test-workflow:
uses: BlueBrain/BluePyOpt/.github/workflows/test.yml@master
keep-workflow-alive:
name: Keep workflow alive
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
ref: master
- name: Get date from 50 days ago
run: |
datethen=`date -d "-50 days" --utc +%FT%TZ`
echo "datelimit=$datethen" >> $GITHUB_ENV
- name: setup git config
if: github.event.repository.pushed_at <= env.datelimit
run: |
# setup the username and email.
git config user.name "Github Actions Keepalive Bot"
git config user.email "<>"
- name: commit IF last commit is older than 50 days
if: github.event.repository.pushed_at <= env.datelimit
run: |
git commit -m "Empty commit to keep the gihub workflows alive" --allow-empty
git push origin master
================================================
FILE: .github/workflows/mirror-ebrains.yml
================================================
name: Mirror to Ebrains
on:
push:
branches: [ master ]
jobs:
to_ebrains:
runs-on: ubuntu-latest
steps:
- name: syncmaster
uses: wei/git-sync@v3
with:
source_repo: "BlueBrain/BluePyOpt"
source_branch: "master"
destination_repo: "https://ghpusher:${{ secrets.EBRAINS_GITLAB_ACCESS_TOKEN }}@gitlab.ebrains.eu/BlueBrain/bluepyopt.git"
destination_branch: "master"
- name: synctags
uses: wei/git-sync@v3
with:
source_repo: "BlueBrain/BluePyOpt"
source_branch: "refs/tags/*"
destination_repo: "https://ghpusher:${{ secrets.EBRAINS_GITLAB_ACCESS_TOKEN }}@gitlab.ebrains.eu/BlueBrain/bluepyopt.git"
destination_branch: "refs/tags/*"
================================================
FILE: .github/workflows/test.yml
================================================
name: Test
on:
pull_request:
# allows this workflow to be reusable (e.g. by the build workflow)
workflow_call:
jobs:
test:
name: Test for python ${{ matrix.python-version }} on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest]
python-version: ["3.9", "3.10", "3.11", "3.12"]
include:
- os: macos-12
python-version: "3.10"
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip setuptools
pip install tox tox-gh-actions
- name: Run tox
run: tox
- name: "Upload coverage to Codecov"
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: false
================================================
FILE: .gitignore
================================================
*.pyc
*.swp
x86_64
/bluepyopt.egg-info/
/build/
/dist/
.DS_Store
/.tox
.ipynb_checkpoints
/.python-version
/cov_reports
.coverage
coverage.xml
.idea/
================================================
FILE: .readthedocs.yaml
================================================
# .readthedocs.yml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
sphinx:
configuration: docs/source/conf.py
fail_on_warning: true
python:
install:
- method: pip
path: .
- requirements: requirements_docs.txt
build:
os: ubuntu-22.04
tools:
python: "3.10"
================================================
FILE: .zenodo.json
================================================
{
"title" : "BluePyOpt",
"license": "LGPL-3.0",
"upload_type": "software",
"description": "The Blue Brain Python Optimisation Library (BluePyOpt) is an extensible framework for data-driven model parameter optimisation that wraps and standardises several existing open-source tools. It simplifies the task of creating and sharing these optimisations, and the associated techniques and knowledge. This is achieved by abstracting the optimisation and evaluation tasks into various reusable and flexible discrete elements according to established best-practices. Further, BluePyOpt provides methods for setting up both small- and large-scale optimisations on a variety of platforms, ranging from laptops to Linux clusters and cloud-based compute infrastructures.",
"creators": [
{
"affiliation": "Blue Brain Project, EPFL",
"name": "Van Geit, Werner",
"orcid": "0000-0002-2915-720X"
},
{
"affiliation": "Blue Brain Project, EPFL",
"name": "Gevaert, Michael",
"orcid": "0000-0002-7547-3297"
},
{
"affiliation": "Blue Brain Project, EPFL",
"name": "Damart, Tanguy",
"orcid": "0000-0003-2175-7304"
},
{
"affiliation": "Blue Brain Project, EPFL",
"name": "Rössert, Christian",
"orcid": "0000-0002-4839-2424"
},
{
"affiliation": "Blue Brain Project, EPFL",
"name": "Courcol, Jean-Denis",
"orcid": "0000-0002-9351-1461"
},
{
"affiliation": "Blue Brain Project, EPFL",
"name": "Chindemi, Guiseppe",
"orcid": "0000-0001-6872-2366"
},
{
"affiliation": "Blue Brain Project, EPFL",
"name": "Jaquier, Aurélien",
"orcid": "0000-0001-6202-6175"
},
{
"affiliation": "Blue Brain Project, EPFL",
"name": "Muller, Eilif",
"orcid": "0000-0003-4309-8266"
}
]
}
================================================
FILE: AUTHORS.txt
================================================
Werner Van Geit @ BBP
Christian Roessert @ BBP
Mike Gevaert @ BBP
Jean-Denis Courcol @ BBP
Giuseppe Chindemi @ BBP
Tanguy Damart @ BBP
Elisabetta Iavarone @ BBP
Anil Tuncel @ BBP
Aurelien Jaquier @ BBP
================================================
FILE: COPYING
================================================
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.
================================================
FILE: COPYING.lesser
================================================
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
================================================
FILE: Dockerfile
================================================
# Copyright (c) 2016-2022, EPFL/Blue Brain Project
#
# This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License version 3.0 as published
# by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
FROM andrewosh/binder-base
MAINTAINER Werner Van Geit
USER root
RUN apt-get update
RUN apt-get install -y wget libx11-6 python-dev git build-essential libncurses-dev
RUN wget https://bootstrap.pypa.io/get-pip.py
RUN python get-pip.py
RUN wget http://www.neuron.yale.edu/ftp/neuron/versions/v7.4/nrn-7.4.x86_64.deb
RUN dpkg -i nrn-7.4.x86_64.deb
RUN rm nrn-7.4.x86_64.deb
RUN pip install bluepyopt
ENV PYTHONPATH /usr/local/nrn/lib/python:$PYTHONPATH
================================================
FILE: LICENSE.txt
================================================
BluePyOpt - Bluebrain Python Optimisation Library
BluePyOpt is licensed under the LGPL, unless noted otherwise, e.g., for external
dependencies. See files COPYING and COPYING.lesser for the full license.
Examples and test are BSD-licensed.
External dependencies are either LGPL or BSD-licensed.
See file ACKNOWLEDGEMENTS.txt and AUTHORS.txt for further details.
Copyright (c) Blue Brain Project/EPFL 2016-2022.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY;
without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
================================================
FILE: MANIFEST.in
================================================
include versioneer.py
include bluepyopt/_version.py
include bluepyopt/ephys/static/arbor_mechanisms.json
include bluepyopt/ephys/templates/cell_template.jinja2
include bluepyopt/ephys/templates/acc/_json_template.jinja2
include bluepyopt/ephys/templates/acc/decor_acc_template.jinja2
include bluepyopt/ephys/templates/acc/label_dict_acc_template.jinja2
include.txt
include AUTHORS.txt
include COPYING
include COPYING.lesser
recursive-include bluepyopt/tests *
================================================
FILE: Makefile
================================================
TEST_REQUIREMENTS=nose coverage mock
all: install
install:
pip install -q . --upgrade
doc: install
pip install -q sphinx sphinx-autobuild sphinx_rtd_theme
cd docs; $(MAKE) clean; $(MAKE) html
docopen: doc
open docs/build/html/index.html
docpdf: install
pip install sphinx sphinx-autobuild
cd docs; $(MAKE) clean; $(MAKE) latexpdf
l5pc_nbconvert: jupyter
cd examples/l5pc && \
jupyter nbconvert --to python L5PC.ipynb && \
sed '/get_ipython/d;/plt\./d;/plot_responses/d;/import matplotlib/d;/neurom/d;/axes/d;/fig/d;/for index/d' L5PC.py >L5PC.tmp && \
mv L5PC.tmp L5PC.py && \
python l5pc_validate_neuron_arbor_pm.py --prepare-only --regions somatic --param-values ../../bluepyopt/tests/testdata/l5pc_validate_neuron_arbor/param_values.json && \
jupyter nbconvert --to python l5pc_validate_neuron_arbor_somatic.ipynb && \
sed '/get_ipython/d;/plt\./d;/import matplotlib/d;/from IPython.display/d;/multiprocessing/d;s/pool.map/map/g;s/# test_l5pc: insert //g;/# test_l5pc: skip/d' l5pc_validate_neuron_arbor_somatic.py >l5pc_validate_neuron_arbor_somatic.tmp && \
mv l5pc_validate_neuron_arbor_somatic.tmp l5pc_validate_neuron_arbor_somatic.py
l5pc_nrnivmodl:
cd examples/l5pc && nrnivmodl mechanisms
l5pc_zip:
cd examples/l5pc && \
zip -qr l5_config.zip config/ morphology/ mechanisms/ l5pc_model.py l5pc_evaluator.py checkpoints/checkpoint.pkl
l5pc_prepare: l5pc_nbconvert l5pc_nrnivmodl
stochkv_prepare:
cd examples/stochkv && ls mechanisms && nrnivmodl mechanisms
sc_prepare: jupyter
cd examples/simplecell && \
jupyter nbconvert --to python simplecell.ipynb && \
sed '/get_ipython/d;/plt\./d;/plot_responses/d;/import matplotlib/d' simplecell.py >simplecell.tmp && \
mv simplecell.tmp simplecell.py && \
jupyter nbconvert --to python simplecell_arbor.ipynb && \
sed '/get_ipython/d;/plt\./d;/plot_responses/d;/import matplotlib/d' simplecell_arbor.py >simplecell_arbor.tmp && \
mv simplecell_arbor.tmp simplecell_arbor.py
meta_prepare: jupyter
cd examples/metaparameters && \
jupyter nbconvert --to python metaparameters.ipynb && \
sed '/get_ipython/d;/plt\./d;/plot_responses/d;/import matplotlib/d' metaparameters.py >metaparameters.tmp && \
mv metaparameters.tmp metaparameters.py
coverage_unit: unit
cd bluepyopt/tests; coverage html -d coverage_html; open coverage_html/index.html
coverage_test: test
cd bluepyopt/tests; coverage html -d coverage_html; open coverage_html/index.html
jupyter:
pip install jupyter
pip install ipython --upgrade
pip install papermill
pip install scipy
install_test_requirements:
pip install -q $(TEST_REQUIREMENTS) --upgrade
test: clean unit functional
unit: install install_test_requirements
cd bluepyopt/tests; nosetests -a 'unit' -s -v -x --with-coverage --cover-xml \
--cover-package bluepyopt;
functional: install install_test_requirements stochkv_prepare l5pc_prepare sc_prepare
cd bluepyopt/tests; nosetests -a '!unit' -s -v -x --with-coverage --cover-xml \
--cover-package bluepyopt;
pypi: test
pip install twine --upgrade
rm -rf dist
python setup.py sdist bdist
twine upload dist/*
example: install
cd examples/simplecell && \
python ./opt_simplecell.py
clean:
rm -rf build
rm -rf docs/build
rm -rf bluepyopt/tests/.coverage
rm -rf bluepyopt/tests/coverage.xml
rm -rf bluepyopt/tests/coverage_html
rm -rf examples/l5pc/L5PC.py
rm -rf examples/l5pc/l5pc_validate_neuron_arbor_somatic.ipynb
rm -rf examples/l5pc/l5pc_validate_neuron_arbor_somatic.py
rm -rf examples/l5pc/x86_64
rm -rf examples/stochkv/x86_64
rm -rf x86_64
rm -rf .coverage
rm -rf coverage.xml
rm -rf channels
rm -rf LEMS_l5pc.xml
rm -rf LEMS_l5pc_nrn.py
rm -rf l5pc.Pop_l5pc_0_0.v.dat
rm -rf time.dat
rm -rf l5pc.hoc
rm -rf l5pc.net.nml
rm -rf l5pc_0_0.cell.nml
rm -rf l5pc_0_0.hoc
rm -rf loadcell.hoc
rm -rf *.mod
find . -name "*.pyc" -exec rm -rf {} \;
l5pc_start: install
cd examples/l5pc && \
@nrnivmodl mechanisms && \
python ./opt_l5pc.py --start
l5pc_cont: install
cd examples/l5pc && \
@nrnivmodl mechanisms && \
python ./opt_l5pc.py --continue_cp
l5pc_analyse: install
cd examples/l5pc && \
@nrnivmodl mechanisms && \
python ./opt_l5pc.py --analyse
push: clean test
git push
git push --tags
check_codecov:
cat codecov.yml | curl --data-binary @- https://codecov.io/validate
toxbinlinks:
cd ${TOX_ENVBINDIR}; find $(TOX_NRNBINDIR) -type f -exec ln -sf \{\} . \;
================================================
FILE: README.rst
================================================
.. warning::
The Blue Brain Project concluded in December 2024, so development has ceased under the BlueBrain GitHub organization.
Future development will take place at: https://github.com/openbraininstitute/BluePyOpt
|banner|
BluePyOpt
=========
+----------------+------------+
| Latest Release | |pypi| |
+----------------+------------+
| Documentation | |docs| |
+----------------+------------+
| License | |license| |
+----------------+------------+
| Build Status | |build| |
+----------------+------------+
| Coverage | |coverage| |
+----------------+------------+
| Gitter | |gitter| |
+----------------+------------+
| Zenodo | |zenodo| |
+----------------+------------+
Introduction
============
The Blue Brain Python Optimisation Library (BluePyOpt) is an extensible
framework for data-driven model parameter optimisation that wraps and
standardises several existing open-source tools.
It simplifies the task of creating and sharing these optimisations,
and the associated techniques and knowledge.
This is achieved by abstracting the optimisation and evaluation tasks
into various reusable and flexible discrete elements according to established
best-practices.
Further, BluePyOpt provides methods for setting up both small- and large-scale
optimisations on a variety of platforms,
ranging from laptops to Linux clusters and cloud-based compute infrastructures.
Citation
========
When you use the BluePyOpt software or method for your research, we ask you to cite the following publication (**this includes poster presentations**):
`Van Geit W, Gevaert M, Chindemi G, Rössert C, Courcol J, Muller EB, Schürmann F, Segev I and Markram H (2016). BluePyOpt: Leveraging open source software and cloud infrastructure to optimise model parameters in neuroscience. Front. Neuroinform. 10:17. doi: 10.3389/fninf.2016.00017 <http://journal.frontiersin.org/article/10.3389/fninf.2016.00017>`_.
.. code-block::
@ARTICLE{bluepyopt,
AUTHOR={Van Geit, Werner and Gevaert, Michael and Chindemi, Giuseppe and Rössert, Christian and Courcol, Jean-Denis and Muller, Eilif Benjamin and Schürmann, Felix and Segev, Idan and Markram, Henry},
TITLE={BluePyOpt: Leveraging open source software and cloud infrastructure to optimise model parameters in neuroscience},
JOURNAL={Frontiers in Neuroinformatics},
VOLUME={10},
YEAR={2016},
NUMBER={17},
URL={http://www.frontiersin.org/neuroinformatics/10.3389/fninf.2016.00017/abstract},
DOI={10.3389/fninf.2016.00017},
ISSN={1662-5196}
}
Publications that use or mention BluePyOpt
==========================================
The list of publications that use or mention BluePyOpt can be found on `the github wiki page <https://github.com/BlueBrain/BluePyOpt/wiki/Publications-that-use-or-mention-BluePyOpt>`_.
Support
=======
We are providing support using a chat channel on `Gitter <https://gitter.im/BlueBrain/BluePyOpt>`_, or the `Github discussion page <https://github.com/BlueBrain/BluePyOpt/discussions>`_.
News
====
- 2023/01: BluePyOpt now supports the Arbor simulator.
- 2022/12: Support for LFPy models merged into master. Examples and preprint: https://github.com/alejoe91/multimodalfitting, https://www.biorxiv.org/content/10.1101/2022.08.03.502468v1.full
- 2022/12: BluePyOpt now has the ability to write out NeuroML files: https://github.com/BlueBrain/BluePyOpt/tree/master/bluepyopt/neuroml
- 2021/08/30: BluePyOpt dropped Python 2.7 support.
- 2017/01/04: BluePyOpt is now considered compatible with Python 3.6+.
- 2016/11/10: BluePyOpt now supports NEURON point processes. This means we can fit parameters of Adex/GIF/Izhikevich models, and also synapse models.
- 2016/06/14: Started a wiki: https://github.com/BlueBrain/BluePyOpt/wiki
- 2016/06/07: The BluePyOpt paper was published in Frontiers in Neuroinformatics (for link, see above)
- 2016/05/03: The API documentation was moved to `ReadTheDocs <http://bluepyopt.readthedocs.io/en/latest/>`_
- 2016/04/20: BluePyOpt now contains the code of the IBEA selector, no need to install a BBP-specific version of DEAP anymore
- 2016/03/24: Released version 1.0
Requirements
============
* `Python 3.9+ <https://www.python.org/downloads/release/python-390/>`_
* `Pip <https://pip.pypa.io>`_ (installed by default in newer versions of Python)
* `Neuron 7.4+ <http://neuron.yale.edu/>`_ (compiled with Python support)
* `eFEL eFeature Extraction Library <https://github.com/BlueBrain/eFEL>`_ (automatically installed by pip)
* `Numpy <http://www.numpy.org>`_ (automatically installed by pip)
* `Pandas <http://pandas.pydata.org/>`_ (automatically installed by pip)
* The instruction below are written assuming you have access to a command shell on Linux / UNIX / MacOSX / Cygwin
Installation
============
If you want to use the ephys module of BluePyOpt, you first need to install NEURON with Python support on your machine.
And then bluepyopt itself:
.. code-block:: bash
pip install bluepyopt
Support for simulators other than NEURON is optional and not installed by default. If you want to use [Arbor](https://arbor-sim.org/) to run your models, use the following line instead to install bluepyopt.
.. code-block:: bash
pip install bluepyopt[arbor]
Cloud infrastructure
====================
We provide instructions on how to set up an optimisation environment on cloud
infrastructure or cluster computers
`here <https://github.com/BlueBrain/BluePyOpt/tree/master/cloud-config>`_
Quick Start
===========
Single compartmental model
--------------------------
An iPython notebook with an introductory optimisation of a one compartmental
model with 2 HH channels can be found at
https://github.com/BlueBrain/BluePyOpt/blob/master/examples/simplecell/simplecell.ipynb (NEURON)
https://github.com/BlueBrain/BluePyOpt/blob/master/examples/simplecell/simplecell_arbor.ipynb (Arbor)
|landscape_example|
**Figure**: The solution space of a single compartmental model with two parameters: the maximal conductance of Na and K ion channels. The color represents how well the model fits two objectives: when injected with two different currents, the model has to fire 1 and 4 action potential respectively during the stimuli. Dark blue is the best fitness. The blue circles represent solutions with a perfect score.
Neocortical Layer 5 Pyramidal Cell
----------------------------------
Scripts for a more complex neocortical L5PC are in
`this directory <https://github.com/BlueBrain/BluePyOpt/tree/master/examples/l5pc>`__
With a notebook:
https://github.com/BlueBrain/BluePyOpt/blob/master/examples/l5pc/L5PC.ipynb (NEURON)
https://github.com/BlueBrain/BluePyOpt/blob/master/examples/l5pc/L5PC_arbor.ipynb (Arbor)
Thalamocortical Cells
---------------------
Scripts for 2 thalamocortical cell types are in
`this directory <https://github.com/BlueBrain/BluePyOpt/tree/master/examples/thalamocortical-cell>`__
With a notebook:
https://github.com/BlueBrain/BluePyOpt/blob/master/examples/thalamocortical-cell/thalamocortical-cell_opt.ipynb
Tsodyks-Markram Model of Short-Term Plasticity
----------------------------------------------
Scripts for 2 version of fitting the Tsodyks-Markram model to synaptic traces are in
`this directory <https://github.com/BlueBrain/BluePyOpt/tree/master/examples/tsodyksmarkramstp>`__
With 2 notebooks:
https://github.com/BlueBrain/BluePyOpt/blob/master/examples/tsodyksmarkramstp/tsodyksmarkramstp.ipynb
https://github.com/BlueBrain/BluePyOpt/blob/master/examples/tsodyksmarkramstp/tsodyksmarkramstp_multiplefreqs.ipynb
Exporting cell in neuroml format
--------------------------------
An iPython notebook showing how to export a BluePyOpt cell in the neuroml format, how to create a LEMS simulation,
and how to run the LEMS simulation with the neuroml cell can be found at:
https://github.com/BlueBrain/BluePyOpt/blob/master/examples/neuroml/neuroml.ipynb
API documentation
=================
The API documentation can be found on `ReadTheDocs <http://bluepyopt.readthedocs.io/en/latest/>`_.
Funding
=======
This work has been partially funded by the European Union Seventh Framework Program (FP7/20072013) under grant agreement no. 604102 (HBP), the European Union’s Horizon 2020 Framework Programme for Research and Innovation under the Specific Grant Agreement No. 720270, 785907 (Human Brain Project SGA1/SGA2) and by the EBRAINS research infrastructure, funded from the European Union’s Horizon 2020 Framework Programme for Research and Innovation under the Specific Grant Agreement No. 945539 (Human Brain Project SGA3).
This project/research was supported by funding to the Blue Brain Project, a research center of the École polytechnique fédérale de Lausanne (EPFL), from the Swiss government’s ETH Board of the Swiss Federal Institutes of Technology.
Copyright (c) 2016-2024 Blue Brain Project/EPFL
..
The following image is also defined in the index.rst file, as the relative path is
different, depending from where it is sourced.
The following location is used for the github README
The index.rst location is used for the docs README; index.rst also defined an end-marker,
to skip content after the marker 'substitutions'.
.. |pypi| image:: https://img.shields.io/pypi/v/bluepyopt.svg
:target: https://pypi.org/project/bluepyopt/
:alt: latest release
.. |docs| image:: https://readthedocs.org/projects/bluepyopt/badge/?version=latest
:target: https://bluepyopt.readthedocs.io/
:alt: latest documentation
.. |license| image:: https://img.shields.io/pypi/l/bluepyopt.svg
:target: https://github.com/BlueBrain/bluepyopt/blob/master/LICENSE.txt
:alt: license
.. |build| image:: https://github.com/BlueBrain/BluePyOpt/workflows/Build/badge.svg?branch=master
:target: https://github.com/BlueBrain/BluePyOpt/actions
:alt: actions build status
.. |coverage| image:: https://codecov.io/github/BlueBrain/BluePyOpt/coverage.svg?branch=master
:target: https://codecov.io/gh/BlueBrain/bluepyopt
:alt: coverage
.. |gitter| image:: https://badges.gitter.im/Join%20Chat.svg
:target: https://gitter.im/BlueBrain/blueptopt
:alt: Join the chat at https://gitter.im/BlueBrain/BluePyOpt
.. |zenodo| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.8135890.svg
:target: https://doi.org/10.5281/zenodo.8135890
.. substitutions
.. |banner| image:: docs/source/logo/BluePyOptBanner.png
.. |landscape_example| image:: examples/simplecell/figures/landscape_example.png
================================================
FILE: bluepyopt/__init__.py
================================================
"""Init script"""
"""
Copyright (c) 2016-2022, EPFL/Blue Brain Project
This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License version 3.0 as published
by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# pylint: disable=W0611
from importlib.metadata import version
__version__ = version("bluepyopt")
from . import tools # NOQA
from .api import * # NOQA
import bluepyopt.optimisations
import bluepyopt.deapext.algorithms
import bluepyopt.stoppingCriteria
import bluepyopt.deapext.optimisations
import bluepyopt.deapext.optimisationsCMA
# Add some backward compatibility for the time when DEAPoptimisation not in
# deapext yet
# TODO deprecate this
bluepyopt.optimisations.DEAPOptimisation = \
bluepyopt.deapext.optimisations.DEAPOptimisation
import bluepyopt.evaluators
import bluepyopt.objectives
import bluepyopt.parameters # NOQA
# TODO let objects read / write themselves using json
# TODO create 'Variables' class
# TODO use 'locations' instead of 'location'
# TODO add island functionality to optimiser
# TODO add plotting functionality
# TODO show progress bar during optimisation
================================================
FILE: bluepyopt/api.py
================================================
"""Common API functionality"""
"""
Copyright (c) 2016-2020, EPFL/Blue Brain Project
This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License version 3.0 as published
by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
'''
import logging
logger = logging.getLogger(__name__)
def set_verboselevel(level):
"""Set verbose level"""
logger.setLevel(level)
'''
================================================
FILE: bluepyopt/deapext/CMA_MO.py
================================================
"""Multi Objective CMA-es class"""
"""
Copyright (c) 2016-2022, EPFL/Blue Brain Project
This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License version 3.0 as published
by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# pylint: disable=R0912, R0914
import logging
import numpy
import copy
from math import log
import deap
from deap import base
from deap import cma
from .stoppingCriteria import MaxNGen, Stagnationv2
from . import utils
from . import hype
logger = logging.getLogger("__main__")
def get_hyped(pop, ubound_score=250., threshold_improvement=240.):
"""Compute the hypervolume contribution of each individual.
The fitness space is first bounded and all dimension who do not show
improvement are ignored.
"""
# Cap the obj at 250
points = numpy.array([ind.fitness.values for ind in pop])
points[points > ubound_score] = ubound_score
lbounds = numpy.min(points, axis=0)
ubounds = numpy.max(points, axis=0)
# Remove the dimensions that do not show any improvement
to_remove = []
for i, lb in enumerate(lbounds):
if lb >= threshold_improvement:
to_remove.append(i)
points = numpy.delete(points, to_remove, axis=1)
lbounds = numpy.delete(lbounds, to_remove)
ubounds = numpy.delete(ubounds, to_remove)
if not len(lbounds):
logger.warning("No dimension along which to compute the hypervolume.")
return [0.] * len(pop)
# Rescale the objective space
# Note: 2 here is a magic number used to make the hypercube larger than it
# really is. It makes sure that the individual always have a non-zero
# hyper-volume contribution and improves the results while avoiding an
# edge case.
points = (points - lbounds) / numpy.max(ubounds.flatten())
ubounds = numpy.max(points, axis=0) + 2.0
hv = hype.hypeIndicatorSampled(
points=points, bounds=ubounds, k=5, nrOfSamples=1000000
)
return hv
class CMA_MO(cma.StrategyMultiObjective):
"""Multiple objective covariance matrix adaption"""
def __init__(
self,
centroids,
offspring_size,
sigma,
max_ngen,
IndCreator,
RandIndCreator,
weight_hv=0.5,
map_function=None,
use_scoop=False,
use_stagnation_criterion=True,
):
"""Constructor
Args:
centroid (list): initial guess used as the starting point of
the CMA-ES
offspring_size (int): number of offspring individuals in each
generation
sigma (float): initial standard deviation of the distribution
max_ngen (int): total number of generation to run
IndCreator (fcn): function returning an individual of the pop
RandIndCreator (fcn): function creating a random individual.
weight_hv (float): between 0 and 1. Weight given to the
hypervolume contribution when computing the score of an
individual in MO-CMA. The weight of the fitness contribution
is computed as 1 - weight_hv.
map_function (map): function used to map (parallelize) the
evaluation function calls
use_scoop (bool): use scoop map for parallel computation
use_stagnation_criterion (bool): whether to use the stagnation
stopping criterion on top of the maximum generation criterion
"""
if offspring_size is None:
lambda_ = int(4 + 3 * log(len(RandIndCreator())))
else:
lambda_ = offspring_size
if centroids is None:
starters = [RandIndCreator() for i in range(lambda_)]
else:
if len(centroids) != lambda_:
from itertools import cycle
generator = cycle(centroids)
starters = [
copy.deepcopy(next(generator)) for i in range(lambda_)
]
else:
starters = centroids
cma.StrategyMultiObjective.__init__(
self, starters, sigma, mu=int(lambda_ * 0.5), lambda_=lambda_
)
self.population = []
self.problem_size = len(starters[0])
self.weight_hv = weight_hv
self.map_function = map_function
self.use_scoop = use_scoop
# Toolbox specific to this CMA-ES
self.toolbox = base.Toolbox()
self.toolbox.register("generate", self.generate, IndCreator)
self.toolbox.register("update", self.update)
if self.use_scoop:
if self.map_function:
raise Exception(
"Impossible to use scoop and provide self defined map "
"function: %s" % self.map_function
)
from scoop import futures
self.map_function = futures.map
# Set termination conditions
self.active = True
if max_ngen <= 0:
max_ngen = 100 + 50 * (self.problem_size + 3) ** 2 / numpy.sqrt(
lambda_
)
self.stopping_conditions = [
MaxNGen(max_ngen),
]
if use_stagnation_criterion:
self.stopping_conditions.append(
Stagnationv2(lambda_, self.problem_size)
)
def _select(self, candidates):
"""Select the best candidates of the population
Fill the next population (chosen) with the Pareto fronts until there
is not enough space. When an entire front does not fit in the space
left we rely on a mixture of hypervolume and fitness. The respective
weights of hypervolume and fitness are "hv" and "1-hv". The remaining
fronts are explicitly not chosen"""
if self.weight_hv == 0.0:
fit = [numpy.sum(ind.fitness.values) for ind in candidates]
idx_scores = list(numpy.argsort(fit))
elif self.weight_hv == 1.0:
hv = get_hyped(candidates)
idx_scores = list(numpy.argsort(hv))[::-1]
else:
hv = get_hyped(candidates)
idx_hv = list(numpy.argsort(hv))[::-1]
fit = [numpy.sum(ind.fitness.values) for ind in candidates]
idx_fit = list(numpy.argsort(fit))
scores = []
for i in range(len(candidates)):
score = (self.weight_hv * idx_hv.index(i)) + (
(1.0 - self.weight_hv) * idx_fit.index(i)
)
scores.append(score)
idx_scores = list(numpy.argsort(scores))
chosen = [candidates[i] for i in idx_scores[: self.mu]]
not_chosen = [candidates[i] for i in idx_scores[self.mu:]]
return chosen, not_chosen
def get_population(self, to_space):
"""Returns the population in the original parameter space"""
pop = copy.deepcopy(self.population)
for i, ind in enumerate(pop):
for j, v in enumerate(ind):
pop[i][j] = to_space[j](v)
return pop
def get_parents(self, to_space):
"""Returns the population in the original parameter space"""
pop = copy.deepcopy(self.parents)
for i, ind in enumerate(pop):
for j, v in enumerate(ind):
pop[i][j] = to_space[j](v)
return pop
def generate_new_pop(self, lbounds, ubounds):
"""Generate a new population bounded in the normalized space"""
self.population = self.toolbox.generate()
return utils.bound(self.population, lbounds, ubounds)
def update_strategy(self):
self.toolbox.update(self.population)
def set_fitness(self, fitnesses):
for f, ind in zip(fitnesses, self.population):
ind.fitness.values = f
def set_fitness_parents(self, fitnesses):
for f, ind in zip(fitnesses, self.parents):
ind.fitness.values = f
def check_termination(self, gen):
stopping_params = {
"gen": gen,
"population": self.population,
}
[c.check(stopping_params) for c in self.stopping_conditions]
for c in self.stopping_conditions:
if c.criteria_met:
logger.info(
"CMA stopped because of termination criteria: " +
"" + " ".join(c.name)
)
self.active = False
================================================
FILE: bluepyopt/deapext/CMA_SO.py
================================================
"""Single Objective CMA-es class"""
"""
Copyright (c) 2016-2022, EPFL/Blue Brain Project
This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License version 3.0 as published
by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# pylint: disable=R0912, R0914
import logging
import numpy
from math import sqrt, log
import copy
from deap import base
from deap import cma
from .stoppingCriteria import (
MaxNGen,
Stagnationv2,
TolHistFun,
EqualFunVals,
NoEffectAxis,
TolUpSigma,
TolX,
ConditionCov,
NoEffectCoor,
)
from . import utils
logger = logging.getLogger("__main__")
class CMA_SO(cma.Strategy):
"""Single objective covariance matrix adaption"""
def __init__(
self,
centroids,
offspring_size,
sigma,
max_ngen,
IndCreator,
RandIndCreator,
map_function=None,
use_scoop=False,
use_stagnation_criterion=True,
):
"""Constructor
Args:
centroid (list): initial guess used as the starting point of
the CMA-ES
offspring_size (int): number of offspring individuals in each
generation
sigma (float): initial standard deviation of the distribution
max_ngen (int): total number of generation to run
IndCreator (fcn): function returning an individual of the pop
RandIndCreator (fcn): function creating a random individual.
map_function (map): function used to map (parallelize) the
evaluation function calls
use_scoop (bool): use scoop map for parallel computation
use_stagnation_criterion (bool): whether to use the stagnation
stopping criterion on top of the maximum generation criterion
"""
if offspring_size is None:
lambda_ = int(4 + 3 * log(len(RandIndCreator())))
else:
lambda_ = offspring_size
if centroids is None:
starter = RandIndCreator()
else:
starter = centroids[0]
cma.Strategy.__init__(self, starter, sigma, lambda_=lambda_)
self.population = []
self.problem_size = len(starter)
self.map_function = map_function
self.use_scoop = use_scoop
# Toolbox specific to this CMA-ES
self.toolbox = base.Toolbox()
self.toolbox.register("generate", self.generate, IndCreator)
self.toolbox.register("update", self.update)
# Set termination conditions
self.active = True
if max_ngen <= 0:
max_ngen = 100 + 50 * (self.problem_size + 3) ** 2 / numpy.sqrt(
lambda_
)
self.stopping_conditions = [
MaxNGen(max_ngen),
TolHistFun(lambda_, self.problem_size),
EqualFunVals(lambda_, self.problem_size),
NoEffectAxis(self.problem_size),
TolUpSigma(float(self.sigma)),
TolX(),
ConditionCov(),
NoEffectCoor(),
]
if use_stagnation_criterion:
self.stopping_conditions.append(
Stagnationv2(lambda_, self.problem_size)
)
def update(self, population):
"""Update the current covariance matrix strategy from the
population"""
population.sort(key=lambda ind: ind.fitness.weighted_reduce,
reverse=True)
old_centroid = self.centroid
self.centroid = numpy.dot(self.weights, population[0:self.mu])
c_diff = self.centroid - old_centroid
# Cumulation : update evolution path
self.ps = (1 - self.cs) * self.ps + sqrt(
self.cs * (2 - self.cs) * self.mueff
) / self.sigma * numpy.dot(
self.B, (1.0 / self.diagD) * numpy.dot(self.B.T, c_diff)
) # noqa
hsig = float(
(
numpy.linalg.norm(self.ps)
/ sqrt(1.0 - (1.0 - self.cs) **
(2.0 * (self.update_count + 1.0)))
/ self.chiN
< (1.4 + 2.0 / (self.dim + 1.0))
)
) # noqa
self.update_count += 1
self.pc = (1 - self.cc) * self.pc + hsig * sqrt(
self.cc * (2 - self.cc) * self.mueff
) / self.sigma * c_diff
# Update covariance matrix
artmp = population[0:self.mu] - old_centroid
self.C = (
(
1
- self.ccov1
- self.ccovmu
+ (1 - hsig) * self.ccov1 * self.cc * (2 - self.cc)
)
* self.C
+ self.ccov1 * numpy.outer(self.pc, self.pc)
+ self.ccovmu * numpy.dot((self.weights * artmp.T), artmp)
/ self.sigma ** 2
)
self.sigma *= numpy.exp(
(numpy.linalg.norm(self.ps) / self.chiN - 1.0) * self.cs
/ self.damps
)
self.diagD, self.B = numpy.linalg.eigh(self.C)
indx = numpy.argsort(self.diagD)
self.cond = self.diagD[indx[-1]] / self.diagD[indx[0]]
self.diagD = self.diagD[indx] ** 0.5
self.B = self.B[:, indx]
self.BD = self.B * self.diagD
def get_population(self, to_space):
"""Returns the population in the original parameter space"""
pop = copy.deepcopy(self.population)
for i, ind in enumerate(pop):
for j, v in enumerate(ind):
pop[i][j] = to_space[j](v)
return pop
def generate_new_pop(self, lbounds, ubounds):
"""Generate a new population bounded in the normalized space"""
self.population = self.toolbox.generate()
return utils.bound(self.population, lbounds, ubounds)
def update_strategy(self):
self.toolbox.update(self.population)
def set_fitness(self, fitnesses):
for f, ind in zip(fitnesses, self.population):
ind.fitness.values = f
def check_termination(self, gen):
stopping_params = {
"gen": gen,
"population": self.population,
"centroid": self.centroid,
"pc": self.pc,
"C": self.C,
"B": self.B,
"sigma": self.sigma,
"diagD": self.diagD,
"cond": self.cond,
}
[c.check(stopping_params) for c in self.stopping_conditions]
for c in self.stopping_conditions:
if c.criteria_met:
logger.info(
"CMA stopped because of termination criteria: " +
"" + " ".join(c.name)
)
self.active = False
================================================
FILE: bluepyopt/deapext/__init__.py
================================================
"""Init script"""
================================================
FILE: bluepyopt/deapext/algorithms.py
================================================
"""Optimisation class"""
"""
Copyright (c) 2016-2022, EPFL/Blue Brain Project
This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License version 3.0 as published
by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# pylint: disable=R0914, R0912
import random
import logging
import shutil
import os
import time
import deap.algorithms
import deap.tools
import pickle
from .stoppingCriteria import MaxNGen
from . import utils
logger = logging.getLogger('__main__')
def _define_fitness(pop, obj_size):
''' Re-instanciate the fitness of the individuals for it to matches the
evaluation function.
'''
from .optimisations import WSListIndividual
new_pop = []
if pop:
for ind in pop:
new_pop.append(WSListIndividual(list(ind), obj_size=obj_size))
return new_pop
def _evaluate_invalid_fitness(toolbox, population):
'''Evaluate the individuals with an invalid fitness
Returns the count of individuals with invalid fitness
'''
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
return len(invalid_ind)
def _get_offspring(parents, toolbox, cxpb, mutpb):
'''return the offspring, use toolbox.variate if possible'''
if hasattr(toolbox, 'variate'):
return toolbox.variate(parents, toolbox, cxpb, mutpb)
return deap.algorithms.varAnd(parents, toolbox, cxpb, mutpb)
def _check_stopping_criteria(criteria, params):
for c in criteria:
c.check(params)
if c.criteria_met:
logger.info('Run stopped because of stopping criteria: ' +
c.name)
return True
else:
return False
def eaAlphaMuPlusLambdaCheckpoint(
population,
toolbox,
mu,
cxpb,
mutpb,
ngen,
stats=None,
halloffame=None,
cp_frequency=1,
cp_period=None,
cp_filename=None,
continue_cp=False,
terminator=None,
param_names=None):
r"""This is the :math:`(~\alpha,\mu~,~\lambda)` evolutionary algorithm
Args:
population(list of deap Individuals)
toolbox(deap Toolbox)
mu(int): Total parent population size of EA
cxpb(float): Crossover probability
mutpb(float): Mutation probability
ngen(int): Total number of generation to run
stats(deap.tools.Statistics): generation of statistics
halloffame(deap.tools.HallOfFame): hall of fame
cp_frequency(int): generations between checkpoints
cp_period(float): minimum time (in s) between checkpoint.
None to save checkpoint independently of the time between them
cp_filename(string): path to checkpoint filename
continue_cp(bool): whether to continue
terminator (multiprocessing.Event): exit loop when is set.
Not taken into account if None.
param_names(list): names of the parameters optimized by the evaluator
"""
if param_names is None:
param_names = []
if cp_filename:
cp_filename_tmp = cp_filename + '.tmp'
if continue_cp:
# A file name has been given, then load the data from the file
cp = pickle.load(open(cp_filename, "rb"))
population = cp["population"]
parents = cp["parents"]
start_gen = cp["generation"]
halloffame = cp["halloffame"]
logbook = cp["logbook"]
history = cp["history"]
random.setstate(cp["rndstate"])
# Assert that the fitness of the individuals match the evaluator
obj_size = len(population[0].fitness.wvalues)
population = _define_fitness(population, obj_size)
parents = _define_fitness(parents, obj_size)
_evaluate_invalid_fitness(toolbox, parents)
_evaluate_invalid_fitness(toolbox, population)
else:
# Start a new evolution
start_gen = 1
parents = population[:]
logbook = deap.tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
history = deap.tools.History()
invalid_count = _evaluate_invalid_fitness(toolbox, population)
utils.update_history_and_hof(halloffame, history, population)
utils.record_stats(
stats, logbook, start_gen, population, invalid_count
)
stopping_criteria = [MaxNGen(ngen)]
# Begin the generational process
gen = start_gen + 1
stopping_params = {"gen": gen}
time_last_save = time.time()
while utils.run_next_gen(
not (_check_stopping_criteria(stopping_criteria, stopping_params)),
terminator):
offspring = _get_offspring(parents, toolbox, cxpb, mutpb)
population = parents + offspring
invalid_count = _evaluate_invalid_fitness(toolbox, offspring)
utils.update_history_and_hof(halloffame, history, population)
utils.record_stats(stats, logbook, gen, population, invalid_count)
# Select the next generation parents
parents = toolbox.select(population, mu)
logger.info(logbook.stream)
if (cp_filename and cp_frequency and
gen % cp_frequency == 0 and
(cp_period is None or time.time() - time_last_save > cp_period)):
cp = dict(population=population,
generation=gen,
parents=parents,
halloffame=halloffame,
history=history,
logbook=logbook,
rndstate=random.getstate(),
param_names=param_names)
pickle.dump(cp, open(cp_filename_tmp, "wb"))
if os.path.isfile(cp_filename_tmp):
shutil.copy(cp_filename_tmp, cp_filename)
logger.debug('Wrote checkpoint to %s', cp_filename)
time_last_save = time.time()
gen += 1
stopping_params["gen"] = gen
return population, halloffame, logbook, history
================================================
FILE: bluepyopt/deapext/hype.py
================================================
"""
Copyright (c) 2016-2022, EPFL/Blue Brain Project
This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License version 3.0 as published
by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import numpy
def hypesub(la, A, actDim, bounds, pvec, alpha, k):
"""HypE algorithm sub function"""
h = numpy.zeros(la)
i = numpy.argsort(A[:, actDim - 1])
S = A[i]
pvec = pvec[i]
for i in range(1, S.shape[0] + 1):
if i < S.shape[0]:
extrusion = S[i, actDim - 1] - S[i - 1, actDim - 1]
else:
extrusion = bounds[actDim - 1] - S[i - 1, actDim - 1]
if actDim == 1:
if i > k:
break
if alpha[i - 1] >= 0:
h[pvec[0:i]] += extrusion * alpha[i - 1]
elif extrusion > 0.0:
h += extrusion * hypesub(
la, S[0:i, :], actDim - 1, bounds, pvec[0:i], alpha, k
)
return h
def hypeIndicatorExact(points, bounds, k):
"""HypE algorithm. Python implementation of the Matlab code available at
https://sop.tik.ee.ethz.ch/download/supplementary/hype/
Args:
points(array): 2D array containing the objective values of the
population
bounds(array): 1D array containing the reference point from which to
compute the hyper-volume
k(int): HypE parameter
"""
Ps = points.shape[0]
if k < 0:
k = Ps
actDim = points.shape[1]
pvec = numpy.arange(points.shape[0])
alpha = []
for i in range(1, k + 1):
j = numpy.arange(1, i)
alpha.append(numpy.prod((k - j) / (Ps - j) / i))
alpha = numpy.asarray(alpha)
return hypesub(points.shape[0], points, actDim, bounds, pvec, alpha, k)
def hypeIndicatorSampled(points, bounds, k, nrOfSamples):
"""Monte-Carlo approximation of the HypE algorithm. Python implementation
of the Matlab code available at
https://sop.tik.ee.ethz.ch/download/supplementary/hype/
Args:
points(array): 2D array containing the objective values of the
population
bounds(array): 1D array containing the reference point from which to
compute the hyper-volume
k(int): HypE parameter
nrOfSamples(int): number of random samples to use for the
Monte-Carlo approximation
"""
nrP = points.shape[0]
dim = points.shape[1]
F = numpy.zeros(nrP)
BoxL = numpy.min(points, axis=0)
alpha = []
for i in range(1, k + 1):
j = numpy.arange(1, i)
alpha.append(numpy.prod((k - j) / (nrP - j) / i))
alpha = numpy.asarray(alpha + [0.0] * nrP)
S = numpy.random.uniform(low=BoxL, high=bounds, size=(nrOfSamples, dim))
dominated = numpy.zeros(nrOfSamples, dtype="uint")
for j in range(1, nrP + 1):
B = S - points[j - 1]
ind = numpy.sum(B >= 0, axis=1) == dim
dominated[ind] += 1
for j in range(1, nrP + 1):
B = S - points[j - 1]
ind = numpy.sum(B >= 0, axis=1) == dim
x = dominated[ind]
F[j - 1] = numpy.sum(alpha[x - 1])
F = F * numpy.prod(bounds - BoxL) / nrOfSamples
return F
================================================
FILE: bluepyopt/deapext/optimisations.py
================================================
"""Optimisation class"""
"""
Copyright (c) 2016-2022, EPFL/Blue Brain Project
This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License version 3.0 as published
by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# pylint: disable=R0912, R0914
import random
import logging
import functools
import deap
import deap.base
import deap.algorithms
import deap.tools
from . import algorithms
from . import tools
from . import utils
import bluepyopt.optimisations
logger = logging.getLogger('__main__')
# TODO decide which variables go in constructor,which ones go in 'run' function
# TODO abstract the algorithm by creating a class for every algorithm, that way
# settings of the algorithm can be stored in objects of these classes
class WeightedSumFitness(deap.base.Fitness):
"""Fitness that compares by weighted sum"""
def __init__(self, values=(), obj_size=None):
self.weights = [-1.0] * obj_size if obj_size is not None else [-1]
super(WeightedSumFitness, self).__init__(values)
@property
def weighted_sum(self):
"""Weighted sum of wvalues"""
return sum(self.wvalues)
@property
def sum(self):
"""Weighted sum of values"""
return sum(self.values)
def __le__(self, other):
return self.weighted_sum <= other.weighted_sum
def __lt__(self, other):
return self.weighted_sum < other.weighted_sum
def __deepcopy__(self, _):
"""Override deepcopy"""
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
class WSListIndividual(list):
"""Individual consisting of list with weighted sum field"""
def __init__(self, *args, **kwargs):
"""Constructor"""
self.fitness = WeightedSumFitness(obj_size=kwargs['obj_size'])
del kwargs['obj_size']
super(WSListIndividual, self).__init__(*args, **kwargs)
class DEAPOptimisation(bluepyopt.optimisations.Optimisation):
"""DEAP Optimisation class"""
def __init__(self, evaluator=None,
use_scoop=False,
seed=1,
offspring_size=10,
eta=10,
mutpb=1.0,
cxpb=1.0,
map_function=None,
hof=None,
selector_name=None):
"""Constructor
Args:
evaluator (Evaluator): Evaluator object
use_scoop (bool): use scoop map for parallel computation
seed (float): Random number generator seed
offspring_size (int): Number of offspring individuals in each
generation
eta (float): Parameter that controls how far the crossover and
mutation operator disturbe the original individuals
mutpb (float): Mutation probability
cxpb (float): Crossover probability
map_function (function): Function used to map (parallelise) the
evaluation function calls
hof (hof): Hall of Fame object
selector_name (str): The selector used in the evolutionary
algorithm, possible values are 'IBEA' or 'NSGA2'
"""
super(DEAPOptimisation, self).__init__(evaluator=evaluator)
self.use_scoop = use_scoop
self.seed = seed
self.offspring_size = offspring_size
self.eta = eta
self.cxpb = cxpb
self.mutpb = mutpb
self.map_function = map_function
self.selector_name = selector_name
if self.selector_name is None:
self.selector_name = 'IBEA'
self.hof = hof
if self.hof is None:
self.hof = deap.tools.HallOfFame(10)
# Create a DEAP toolbox
self.toolbox = deap.base.Toolbox()
self.setup_deap()
def setup_deap(self):
"""Set up optimisation"""
# Number of objectives
OBJ_SIZE = len(self.evaluator.objectives)
# Set random seed
random.seed(self.seed)
# Eta parameter of crossover / mutation parameters
# Basically defines how much they 'spread' solution around
# The lower this value, the more spread
ETA = self.eta
# Number of parameters
IND_SIZE = len(self.evaluator.params)
if IND_SIZE == 0:
raise ValueError(
"Length of evaluator.params is zero. At least one "
"non-fix parameter is needed to run an optimization."
)
# Bounds for the parameters
LOWER = []
UPPER = []
for parameter in self.evaluator.params:
LOWER.append(parameter.lower_bound)
UPPER.append(parameter.upper_bound)
# Register the 'uniform' function
self.toolbox.register("uniformparams", utils.uniform, LOWER, UPPER,
IND_SIZE)
# Register the individual format
# An indiviual is create by WSListIndividual and parameters
# are initially
# picked by 'uniform'
self.toolbox.register(
"Individual",
deap.tools.initIterate,
functools.partial(WSListIndividual, obj_size=OBJ_SIZE),
self.toolbox.uniformparams)
# Register the population format. It is a list of individuals
self.toolbox.register(
"population",
deap.tools.initRepeat,
list,
self.toolbox.Individual)
# Register the evaluation function for the individuals
# import deap_efel_eval1
self.toolbox.register(
"evaluate",
self.evaluator.init_simulator_and_evaluate_with_lists
)
# Register the mate operator
self.toolbox.register(
"mate",
deap.tools.cxSimulatedBinaryBounded,
eta=ETA,
low=LOWER,
up=UPPER)
# Register the mutation operator
self.toolbox.register(
"mutate",
deap.tools.mutPolynomialBounded,
eta=ETA,
low=LOWER,
up=UPPER,
indpb=0.5)
# Register the variate operator
self.toolbox.register("variate", deap.algorithms.varAnd)
# Register the selector (picks parents from population)
if self.selector_name == 'IBEA':
self.toolbox.register("select", tools.selIBEA)
elif self.selector_name == 'NSGA2':
self.toolbox.register("select", deap.tools.emo.selNSGA2)
else:
raise ValueError('DEAPOptimisation: Constructor selector_name '
'argument only accepts "IBEA" or "NSGA2"')
import copyreg
import types
copyreg.pickle(types.MethodType, utils.reduce_method)
if self.use_scoop:
if self.map_function:
raise Exception(
'Impossible to use scoop is providing self '
'defined map function: %s' %
self.map_function)
from scoop import futures
self.toolbox.register("map", futures.map)
elif self.map_function:
self.toolbox.register("map", self.map_function)
def run(self,
max_ngen=10,
offspring_size=None,
continue_cp=False,
cp_filename=None,
cp_frequency=1,
cp_period=None,
parent_population=None,
terminator=None):
"""Run optimisation"""
# Allow run function to override offspring_size
# TODO probably in the future this should not be an object field
# anymore
# keeping for backward compatibility
if offspring_size is None:
offspring_size = self.offspring_size
# Generate the population object
if parent_population is not None:
if len(parent_population) != offspring_size:
offspring_size = len(parent_population)
self.offspring_size = len(parent_population)
logger.warning(
'The length of the provided population is different from '
'the offspring_size. The offspring_size will be '
'overwritten.'
)
OBJ_SIZE = len(self.evaluator.objectives)
IND_SIZE = len(self.evaluator.params)
pop = []
for ind in parent_population:
if len(ind) != IND_SIZE:
raise Exception(
'The length of the provided individual is not equal '
'to the number of parameter in the evaluator ')
pop.append(WSListIndividual(ind, obj_size=OBJ_SIZE))
else:
pop = self.toolbox.population(n=offspring_size)
stats = deap.tools.Statistics(key=lambda ind: ind.fitness.sum)
import numpy
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
param_names = []
if hasattr(self.evaluator, "param_names"):
param_names = self.evaluator.param_names
pop, hof, log, history = algorithms.eaAlphaMuPlusLambdaCheckpoint(
pop,
self.toolbox,
offspring_size,
self.cxpb,
self.mutpb,
max_ngen,
stats=stats,
halloffame=self.hof,
cp_frequency=cp_frequency,
cp_period=None,
continue_cp=continue_cp,
cp_filename=cp_filename,
terminator=terminator,
param_names=param_names)
# Update hall of fame
self.hof = hof
return pop, self.hof, log, history
class IBEADEAPOptimisation(DEAPOptimisation):
"""IBEA DEAP class"""
def __init__(self, *args, **kwargs):
"""Constructor"""
super(IBEADEAPOptimisation, self).__init__(*args, **kwargs)
================================================
FILE: bluepyopt/deapext/optimisationsCMA.py
================================================
"""CMA Optimisation class"""
"""
Copyright (c) 2016-2022, EPFL/Blue Brain Project
This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License version 3.0 as published
by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import logging
import numpy
import pickle
import random
import functools
import shutil
import os
import time
import deap.tools
from .CMA_SO import CMA_SO
from .CMA_MO import CMA_MO
from . import utils
import bluepyopt.optimisations
logger = logging.getLogger("__main__")
def _ind_convert_space(ind, convert_fcn):
"""util function to pass the individual from normalized to real space and
inversely"""
return [f(x) for f, x in zip(convert_fcn, ind)]
class DEAPOptimisationCMA(bluepyopt.optimisations.Optimisation):
"""Optimisation class for CMA-based evolution strategies"""
def __init__(
self,
evaluator=None,
use_scoop=False,
seed=1,
offspring_size=None,
centroids=None,
sigma=0.4,
map_function=None,
hof=None,
selector_name="single_objective",
weight_hv=0.5,
fitness_reduce=numpy.sum,
use_stagnation_criterion=True,
):
"""Constructor
Args:
evaluator (Evaluator): Evaluator object
use_scoop (bool): use scoop map for parallel computation
seed (float): Random number generator seed
offspring_size (int): Number of offspring individuals in each
generation
centroids (list): list of initial guesses used as the starting
points of the CMA-ES
sigma (float): initial standard deviation of the distribution
map_function (function): Function used to map (parallelize) the
evaluation function calls
hof (hof): Hall of Fame object
selector_name (str): The selector used in the evolutionary
algorithm, possible values are 'single_objective' or
'multi_objective'
weight_hv (float): between 0 and 1. Weight given to the
hyper-volume contribution when computing the score of an
individual in MO-CMA. The weight of the fitness contribution
is computed as 1 - weight_hv.
fitness_reduce (fcn): function used to reduce the objective values
to a single fitness score
use_stagnation_criterion (bool): whether to use the stagnation
stopping criterion on top of the maximum generation criterion
"""
super(DEAPOptimisationCMA, self).__init__(evaluator=evaluator)
self.use_scoop = use_scoop
self.seed = seed
self.map_function = map_function
self.hof = hof
if self.hof is None:
self.hof = deap.tools.HallOfFame(10)
self.offspring_size = offspring_size
self.fitness_reduce = fitness_reduce
self.centroids = centroids
self.sigma = sigma
if weight_hv > 1.0 or weight_hv < 0.0:
raise Exception("weight_hv has to be between 0 and 1.")
self.weight_hv = weight_hv
self.selector_name = selector_name
if self.selector_name == "single_objective":
self.cma_creator = CMA_SO
elif self.selector_name == "multi_objective":
self.cma_creator = CMA_MO
else:
raise Exception(
"The selector_name has to be 'single_objective' "
"or 'multi_objective'. Not "
"{}".format(self.selector_name)
)
self.use_stagnation_criterion = use_stagnation_criterion
# Number of objective values
self.problem_size = len(self.evaluator.params)
# Number of parameters
self.ind_size = len(self.evaluator.objectives)
# Create a DEAP toolbox
self.toolbox = deap.base.Toolbox()
# Bounds for the parameters
self.lbounds = [p.lower_bound for p in self.evaluator.params]
self.ubounds = [p.upper_bound for p in self.evaluator.params]
# Instantiate functions converting individuals from the original
# parameter space to (and from) a normalized space bounded to [-1.;1]
self.ubounds = numpy.asarray(self.ubounds)
self.lbounds = numpy.asarray(self.lbounds)
bounds_radius = (self.ubounds - self.lbounds) / 2.0
bounds_mean = (self.ubounds + self.lbounds) / 2.0
self.to_norm = []
self.to_space = []
for r, m in zip(bounds_radius, bounds_mean):
self.to_norm.append(
functools.partial(
lambda param, bm, br: (param - bm) / br,
bm=m,
br=r)
)
self.to_space.append(
functools.partial(
lambda param, bm, br: (param * br) + bm,
bm=m,
br=r
)
)
# Overwrite the bounds with -1. and 1.
self.lbounds = numpy.full(self.problem_size, -1.0)
self.ubounds = numpy.full(self.problem_size, 1.0)
self.setup_deap()
# In case initial guesses were provided, rescale them to the norm space
if self.centroids is not None:
self.centroids = [
self.toolbox.Individual(_ind_convert_space(ind, self.to_norm))
for ind in centroids
]
def setup_deap(self):
"""Set up optimisation"""
# Set random seed
random.seed(self.seed)
numpy.random.seed(self.seed)
# Register the 'uniform' function
self.toolbox.register(
"uniformparams",
utils.uniform,
self.lbounds,
self.ubounds,
self.ind_size
)
# Register the individual format
self.toolbox.register(
"Individual",
functools.partial(
utils.WSListIndividual,
obj_size=self.ind_size,
reduce_fcn=self.fitness_reduce,
),
)
# A Random Individual is created by ListIndividual and parameters are
# initially picked by 'uniform'
self.toolbox.register(
"RandomInd",
deap.tools.initIterate,
self.toolbox.Individual,
self.toolbox.uniformparams,
)
# Register the population format. It is a list of individuals
self.toolbox.register(
"population", deap.tools.initRepeat, list, self.toolbox.RandomInd
)
# Register the evaluation function for the individuals
self.toolbox.register(
"evaluate",
self.evaluator.init_simulator_and_evaluate_with_lists
)
import copyreg
import types
copyreg.pickle(types.MethodType, utils.reduce_method)
if self.use_scoop:
if self.map_function:
raise Exception(
"Impossible to use scoop is providing self defined map "
"function: %s" % self.map_function
)
from scoop import futures
self.toolbox.register("map", futures.map)
elif self.map_function:
self.toolbox.register("map", self.map_function)
def run(
self,
max_ngen=0,
cp_frequency=1,
cp_period=None,
continue_cp=False,
cp_filename=None,
terminator=None,
):
""" Run the optimizer until a stopping criteria is met.
Args:
max_ngen(int): Total number of generation to run
cp_frequency(int): generations between checkpoints
cp_period(float): minimum time (in s) between checkpoint.
None to save checkpoint independently of the time between them
continue_cp(bool): whether to continue
cp_filename(string): path to checkpoint filename
terminator (multiprocessing.Event): exit loop when is set.
Not taken into account if None.
"""
if cp_filename:
cp_filename_tmp = cp_filename + '.tmp'
stats = self.get_stats()
if continue_cp:
# A file name has been given, then load the data from the file
cp = pickle.load(open(cp_filename, "rb"))
gen = cp["generation"]
self.hof = cp["halloffame"]
logbook = cp["logbook"]
history = cp["history"]
random.setstate(cp["rndstate"])
numpy.random.set_state(cp["np_rndstate"])
CMA_es = cp["CMA_es"]
CMA_es.map_function = self.map_function
else:
history = deap.tools.History()
logbook = deap.tools.Logbook()
logbook.header = ["gen", "nevals"] + stats.fields
# Instantiate the CMA strategy centered on the centroids
CMA_es = self.cma_creator(
centroids=self.centroids,
offspring_size=self.offspring_size,
sigma=self.sigma,
max_ngen=max_ngen,
IndCreator=self.toolbox.Individual,
RandIndCreator=self.toolbox.RandomInd,
map_function=self.map_function,
use_scoop=self.use_scoop,
use_stagnation_criterion=self.use_stagnation_criterion,
)
if self.selector_name == "multi_objective":
CMA_es.weight_hv = self.weight_hv
to_evaluate = CMA_es.get_parents(self.to_space)
fitness = self.toolbox.map(self.toolbox.evaluate, to_evaluate)
fitness = list(map(list, fitness))
CMA_es.set_fitness_parents(fitness)
gen = 1
pop = CMA_es.get_population(self.to_space)
param_names = []
if hasattr(self.evaluator, "param_names"):
param_names = self.evaluator.param_names
time_last_save = time.time()
# Run until a termination criteria is met
while utils.run_next_gen(CMA_es.active, terminator):
logger.info("Generation {}".format(gen))
# Generate the new populations
n_out = CMA_es.generate_new_pop(
lbounds=self.lbounds, ubounds=self.ubounds
)
logger.debug(
"Number of individuals outside of bounds: {} ({:.2f}%)".format(
n_out,
100.0 * n_out / len(CMA_es.population)
)
)
# Get all the individuals in the original space for evaluation
to_evaluate = CMA_es.get_population(self.to_space)
# Compute the fitness
fitness = self.toolbox.map(self.toolbox.evaluate, to_evaluate)
fitness = list(map(list, fitness))
nevals = len(to_evaluate)
CMA_es.set_fitness(fitness)
# Update the hall of fame, history and logbook
pop = CMA_es.get_population(self.to_space)
utils.update_history_and_hof(self.hof, history, pop)
record = utils.record_stats(stats, logbook, gen, pop, nevals)
logger.info(logbook.stream)
# Update the CMA strategy using the new fitness and check if
# termination conditions were reached
CMA_es.update_strategy()
CMA_es.check_termination(gen)
if (
cp_filename and
cp_frequency and
gen % cp_frequency == 0 and
(cp_period is None or time.time() - time_last_save > cp_period)
):
# Map function shouldn't be pickled
temp_mf = CMA_es.map_function
CMA_es.map_function = None
cp = dict(
population=pop,
generation=gen,
halloffame=self.hof,
history=history,
logbook=logbook,
rndstate=random.getstate(),
np_rndstate=numpy.random.get_state(),
CMA_es=CMA_es,
param_names=param_names,
)
pickle.dump(cp, open(cp_filename_tmp, "wb"))
if os.path.isfile(cp_filename_tmp):
shutil.copy(cp_filename_tmp, cp_filename)
logger.debug("Wrote checkpoint to %s", cp_filename)
CMA_es.map_function = temp_mf
time_last_save = time.time()
gen += 1
return pop, self.hof, logbook, history
def get_stats(self):
"""Get the stats that will be saved during optimisation"""
stats = deap.tools.Statistics(key=lambda ind: ind.fitness.reduce)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
return stats
================================================
FILE: bluepyopt/deapext/stoppingCriteria.py
================================================
"""StoppingCriteria class"""
"""
Copyright (c) 2016-2022, EPFL/Blue Brain Project
This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License version 3.0 as published
by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# pylint: disable=R0912, R0914
import logging
import numpy
from collections import deque
import bluepyopt.stoppingCriteria
logger = logging.getLogger("__main__")
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
class MaxNGen(bluepyopt.stoppingCriteria.StoppingCriteria):
"""Max ngen stopping criteria class"""
name = "Max ngen"
def __init__(self, max_ngen):
"""Constructor"""
super(MaxNGen, self).__init__()
self.max_ngen = max_ngen
def check(self, kwargs):
"""Check if the maximum number of iteration is reached"""
gen = kwargs.get("gen")
if gen > self.max_ngen:
self.criteria_met = True
class Stagnation(bluepyopt.stoppingCriteria.StoppingCriteria):
"""Stagnation stopping criteria class"""
name = "Stagnation"
def __init__(self, lambda_, problem_size):
"""Constructor"""
super(Stagnation, self).__init__()
self.lambda_ = lambda_
self.problem_size = problem_size
self.stagnation_iter = None
self.best = []
self.median = []
def check(self, kwargs):
"""Check if the population stopped improving"""
ngen = kwargs.get("gen")
population = kwargs.get("population")
fitness = [ind.fitness.reduce for ind in population]
fitness.sort()
# condition to avoid duplicates when re-starting
if len(self.best) < ngen:
self.best.append(fitness[0])
self.median.append(fitness[int(round(len(fitness) / 2.0))])
self.stagnation_iter = int(
numpy.ceil(
0.2 * ngen + 120 + 30.0 * self.problem_size / self.lambda_
)
)
cbest = len(self.best) > self.stagnation_iter
cmed = len(self.median) > self.stagnation_iter
cbest2 = numpy.median(self.best[-20:]) >= numpy.median(
self.best[-self.stagnation_iter:-self.stagnation_iter + 20]
)
cmed2 = numpy.median(self.median[-20:]) >= numpy.median(
self.median[-self.stagnation_iter:-self.stagnation_iter + 20]
)
if cbest and cmed and cbest2 and cmed2:
self.criteria_met = True
class Stagnationv2(bluepyopt.stoppingCriteria.StoppingCriteria):
"""Stagnation stopping criteria class"""
name = "Stagnationv2"
def __init__(
self, lambda_, problem_size, threshold=0.01, std_threshold=0.02
):
"""Constructor
Args:
lambda_ (int): offspring size
problem_size (int): problem size
threshold (float): 1st criterion is triggered if best fitness
improves less than this threshold for 100 generations
std_threshold (float): 2nd criterion is triggered if
standard deviation of the best fitness over
the last 20 generations is below the best fitness multiplied
by this threshold
"""
super(Stagnationv2, self).__init__()
self.lambda_ = lambda_
self.problem_size = problem_size
self.stagnation_iter = None
self.threshold = threshold
self.std_threshold = std_threshold
self.best = []
def check(self, kwargs):
"""Check if best model fitness does not improve over 1% over 100 gens
and is not noisy in the last 20 generations
"""
ngen = kwargs.get("gen")
population = kwargs.get("population")
fitness = [ind.fitness.reduce for ind in population]
fitness.sort()
# condition to avoid duplicates when re-starting
if len(self.best) < ngen:
self.best.append(fitness[0])
self.stagnation_iter = int(
numpy.ceil(
0.2 * ngen + 120 + 30.0 * self.problem_size / self.lambda_
)
)
crit1 = len(self.best) > self.stagnation_iter
crit2 = numpy.median(self.best[-20:]) * (1 + self.threshold) \
> numpy.median(self.best[-120:-100])
crit3 = numpy.std(self.best[-20:]) < (
self.std_threshold * self.best[-1]
)
if crit1 and crit2 and crit3:
self.criteria_met = True
class TolHistFun(bluepyopt.stoppingCriteria.StoppingCriteria):
"""TolHistFun stopping criteria class"""
name = "TolHistFun"
def __init__(self, lambda_, problem_size):
"""Constructor"""
super(TolHistFun, self).__init__()
self.tolhistfun = 10 ** -12
self.mins = deque(
maxlen=10 + int(numpy.ceil(30.0 * problem_size / lambda_)))
def check(self, kwargs):
"""Check if the range of the best values is smaller than
the threshold"""
population = kwargs.get("population")
self.mins.append(numpy.min([ind.fitness.reduce for ind in population]))
if (
len(self.mins) == self.mins.maxlen
and max(self.mins) - min(self.mins) < self.tolhistfun
):
self.criteria_met = True
class EqualFunVals(bluepyopt.stoppingCriteria.StoppingCriteria):
"""EqualFunVals stopping criteria class"""
name = "EqualFunVals"
def __init__(self, lambda_, problem_size):
"""Constructor"""
super(EqualFunVals, self).__init__()
self.problem_size = problem_size
self.equalvals = float(problem_size) / 3.0
self.equalvals_k = int(numpy.ceil(0.1 + lambda_ / 4.0))
self.equalvalues = []
def check(self, kwargs):
"""Check if in 1/3rd of the last problem_size iterations the best and
k'th best solutions are equal"""
ngen = kwargs.get("gen")
population = kwargs.get("population")
fitness = [ind.fitness.reduce for ind in population]
fitness.sort()
if isclose(fitness[0], fitness[-self.equalvals_k], rel_tol=1e-6):
self.equalvalues.append(1)
else:
self.equalvalues.append(0)
if (
ngen > self.problem_size
and sum(self.equalvalues[-self.problem_size:]) > self.equalvals
):
self.criteria_met = True
class TolX(bluepyopt.stoppingCriteria.StoppingCriteria):
"""TolX stopping criteria class"""
name = "TolX"
def __init__(self):
"""Constructor"""
super(TolX, self).__init__()
self.tolx = 10 ** -12
def check(self, kwargs):
"""Check if all components of pc and sqrt(diag(C)) are smaller than
a threshold"""
pc = kwargs.get("pc")
C = kwargs.get("C")
if all(pc < self.tolx) and all(numpy.sqrt(numpy.diag(C)) < self.tolx):
self.criteria_met = True
class TolUpSigma(bluepyopt.stoppingCriteria.StoppingCriteria):
"""TolUpSigma stopping criteria class"""
name = "TolUpSigma"
def __init__(self, sigma0):
"""Constructor"""
super(TolUpSigma, self).__init__()
self.sigma0 = sigma0
self.tolupsigma = 10 ** 20
def check(self, kwargs):
"""Check if the sigma/sigma0 ratio is bigger than a threshold"""
sigma = kwargs.get("sigma")
diagD = kwargs.get("diagD")
if sigma / self.sigma0 > float(diagD[-1] ** 2) * self.tolupsigma:
self.criteria_met = True
class ConditionCov(bluepyopt.stoppingCriteria.StoppingCriteria):
"""ConditionCov stopping criteria class"""
name = "ConditionCov"
def __init__(self):
"""Constructor"""
super(ConditionCov, self).__init__()
self.conditioncov = 10 ** 14
def check(self, kwargs):
"""Check if the condition number of the covariance matrix is
too large"""
cond = kwargs.get("cond")
if cond > self.conditioncov:
self.criteria_met = True
class NoEffectAxis(bluepyopt.stoppingCriteria.StoppingCriteria):
"""NoEffectAxis stopping criteria class"""
name = "NoEffectAxis"
def __init__(self, problem_size):
"""Constructor"""
super(NoEffectAxis, self).__init__()
self.conditioncov = 10 ** 14
self.problem_size = problem_size
def check(self, kwargs):
"""Check if the coordinate axis std is too low"""
ngen = kwargs.get("gen")
centroid = kwargs.get("centroid")
sigma = kwargs.get("sigma")
diagD = kwargs.get("diagD")
B = kwargs.get("B")
noeffectaxis_index = ngen % self.problem_size
if all(
centroid
== centroid
+ 0.1 * sigma * diagD[-noeffectaxis_index] * B[-noeffectaxis_index]
):
self.criteria_met = True
class NoEffectCoor(bluepyopt.stoppingCriteria.StoppingCriteria):
"""NoEffectCoor stopping criteria class"""
name = "NoEffectCoor"
def __init__(self):
"""Constructor"""
super(NoEffectCoor, self).__init__()
def check(self, kwargs):
"""Check if main axis std has no effect"""
centroid = kwargs.get("centroid")
sigma = kwargs.get("sigma")
C = kwargs.get("C")
if any(centroid == centroid + 0.2 * sigma * numpy.diag(C)):
self.criteria_met = True
================================================
FILE: bluepyopt/deapext/tools/__init__.py
================================================
"""Init"""
from .selIBEA import * # NOQA
================================================
FILE: bluepyopt/deapext/tools/selIBEA.py
================================================
"""IBEA selector"""
"""
Copyright (c) 2016-2022, EPFL/Blue Brain Project
This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License version 3.0 as published
by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
The code in this file was original written in 2015 at the
BlueBrain Project, EPFL, Lausanne
The authors were Werner Van Geit, Michael Gevaert and Jean-Denis Courcol
It is based on a C implementation of the IBEA algorithm in the PISA
optimization framework developed at the ETH, Zurich
http://www.tik.ee.ethz.ch/pisa/selectors/ibea/?page=ibea.php
"""
import numpy
import random
def selIBEA(population, mu, alpha=None, kappa=.05, tournament_n=4):
"""IBEA Selector"""
if alpha is None:
alpha = len(population)
# Calculate a matrix with the fitness components of every individual
components = _calc_fitness_components(population, kappa=kappa)
# Calculate the fitness values
_calc_fitnesses(population, components)
# Do the environmental selection
population[:] = _environmental_selection(population, alpha)
# Select the parents in a tournament
parents = _mating_selection(population, mu, tournament_n)
return parents
def _calc_fitness_components(population, kappa):
"""returns an N * N numpy array of doubles, which is their IBEA fitness """
# DEAP selector are supposed to maximise the objective values
# We take the negative objectives because this algorithm will minimise
population_matrix = numpy.fromiter(
iter(-x for individual in population
for x in individual.fitness.wvalues),
dtype=numpy.float64)
pop_len = len(population)
feat_len = len(population[0].fitness.wvalues)
population_matrix = population_matrix.reshape((pop_len, feat_len))
# Calculate minimal square bounding box of the objectives
box_ranges = (numpy.max(population_matrix, axis=0) -
numpy.min(population_matrix, axis=0))
# Replace all possible zeros to avoid division by zero
# Basically 0/0 is replaced by 0/1
box_ranges[box_ranges == 0] = 1.0
components_matrix = numpy.zeros((pop_len, pop_len))
for i in range(0, pop_len):
diff = population_matrix - population_matrix[i, :]
components_matrix[i, :] = numpy.max(
numpy.divide(diff, box_ranges),
axis=1)
# Calculate max of absolute value of all elements in matrix
max_absolute_indicator = numpy.max(numpy.abs(components_matrix))
# Normalisation
if max_absolute_indicator != 0:
components_matrix = numpy.exp(
(-1.0 / (kappa * max_absolute_indicator)) * components_matrix.T)
return components_matrix
def _calc_fitnesses(population, components):
"""Calculate the IBEA fitness of every individual"""
# Calculate sum of every column in the matrix, ignore diagonal elements
column_sums = numpy.sum(components, axis=0) - numpy.diagonal(components)
# Fill the 'ibea_fitness' field on the individuals with the fitness value
for individual, ibea_fitness in zip(population, column_sums):
individual.ibea_fitness = ibea_fitness
def _choice(seq):
"""Python 2 implementation of choice"""
return seq[int(random.random() * len(seq))]
def _mating_selection(population, mu, tournament_n):
"""Returns the n_of_parents individuals with the best fitness"""
parents = []
for _ in range(mu):
winner = _choice(population)
for _ in range(tournament_n - 1):
individual = _choice(population)
# Save winner is element with smallest fitness
if individual.ibea_fitness < winner.ibea_fitness:
winner = individual
parents.append(winner)
return parents
def _environmental_selection(population, selection_size):
"""Returns the selection_size individuals with the best fitness"""
# Sort the individuals based on their fitness
population.sort(key=lambda ind: ind.ibea_fitness)
# Return the first 'selection_size' elements
return population[:selection_size]
__all__ = ['selIBEA']
================================================
FILE: bluepyopt/deapext/utils.py
================================================
"""Utils function"""
"""
Copyright (c) 2016-2022, EPFL/Blue Brain Project
This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License version 3.0 as published
by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import numpy
import random
import deap.base
# pylint: disable=R0914, R0912
class WeightedReducedFitness(deap.base.Fitness):
"""Fitness that compares by weighted objective values"""
def __init__(self, values=(), obj_size=None, reduce_fcn=numpy.sum):
self.weights = [-1.0] * obj_size if obj_size is not None else [-1]
self.reduce_fcn = reduce_fcn
super(WeightedReducedFitness, self).__init__(values)
@property
def reduce(self):
"""Reduce of values"""
return self.reduce_fcn(self.values)
@property
def weighted_reduce(self):
"""Reduce of weighted values"""
return self.reduce_fcn(self.wvalues)
def __le__(self, other):
return self.weighted_reduce <= other.weighted_reduce
def __lt__(self, other):
return self.weighted_reduce < other.weighted_reduce
def __deepcopy__(self, _):
"""Override deepcopy"""
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
class WSListIndividual(list):
"""Individual consisting of a list with weighted fitness"""
def __init__(self, *args, **kwargs):
"""Constructor"""
reduce_fcn = kwargs.get("reduce_fcn", numpy.sum)
self.fitness = WeightedReducedFitness(
obj_size=kwargs["obj_size"], reduce_fcn=reduce_fcn
)
# Index of the parent, used by MO-CMA
self._ps = "p", 0
del kwargs["obj_size"]
if "reduce_fcn" in kwargs:
del kwargs["reduce_fcn"]
super(WSListIndividual, self).__init__(*args, **kwargs)
def update_history_and_hof(halloffame, history, population):
"""Update the hall of fame with the generated individuals
Note: History and Hall-of-Fame behave like dictionaries
"""
if halloffame is not None:
halloffame.update(population)
history.update(population)
def record_stats(stats, logbook, gen, population, invalid_count):
"""Update the statistics with the new population"""
record = stats.compile(population) if stats is not None else {}
logbook.record(gen=gen, nevals=invalid_count, **record)
def closest_feasible(individual, lbounds, ubounds):
"""Returns the closest individual in the parameter bounds"""
# TODO: Fix 1e-9 hack
for i, (u, l, el) in enumerate(zip(ubounds, lbounds, individual)):
if el >= u:
individual[i] = u - 1e-9
elif el <= l:
individual[i] = l + 1e-9
return individual
def bound(population, lbounds, ubounds):
"""Bounds the population based on lower and upper parameter bounds."""
n_out = 0
for i, ind in enumerate(population):
if numpy.any(numpy.less(ind, lbounds)) or numpy.any(
numpy.greater(ind, ubounds)
):
population[i] = closest_feasible(ind, lbounds, ubounds)
n_out += 1
return n_out
def uniform(lower_list, upper_list, dimensions):
"""Uniformly pick an individual"""
if hasattr(lower_list, "__iter__"):
return [
random.uniform(lower, upper) for lower, upper in
zip(lower_list, upper_list)
]
else:
return [random.uniform(lower_list, upper_list) for _ in
range(dimensions)]
def reduce_method(meth):
"""Overwrite reduce"""
return (getattr, (meth.__self__, meth.__func__.__name__))
def run_next_gen(criteria, terminator):
"""Condition to stay inside the loop."""
if terminator is None:
return criteria
return criteria and not terminator.is_set()
================================================
FILE: bluepyopt/ephys/__init__.py
================================================
"""Init script"""
"""
Copyright (c) 2016-2020, EPFL/Blue Brain Project
This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License version 3.0 as published
by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# pylint: disable=W0511
from . import base # NOQA
from . import simulators # NOQA
from . import models # NOQA
from . import evaluators # NOQA
from . import mechanisms # NOQA
from . import locations # NOQA
from . import parameterscalers # NOQA
from . import parameters # NOQA
from . import morphologies # NOQA
from . import efeatures # NOQA
from . import objectives # NOQA
from . import protocols # NOQA
from . import responses # NOQA
from . import recordings # NOQA
from . import objectivescalculators # NOQA
from . import stimuli # NOQA
# TODO create all the necessary abstract methods
# TODO check inheritance structure
# TODO instantiate using 'simulation env' as parameter, instead of cell
================================================
FILE: bluepyopt/ephys/acc.py
================================================
'''Dependencies of Arbor simulator backend'''
try:
import arbor
except ImportError as e:
class arbor:
def __getattribute__(self, _):
raise ImportError("Exporting cell models to ACC/JSON, loading"
" them or optimizing them with the Arbor"
" simulator requires missing dependency arbor."
" To install BluePyOpt with arbor,"
" run 'pip install bluepyopt[arbor]'.")
class ArbLabel:
"""Arbor label"""
def __init__(self, type, name, s_expr):
if type not in ['locset', 'region', 'iexpr']:
raise ValueError('Invalid Arbor label type %s' % type)
self._type = type
self._name = name
self._s_expr = s_expr
@property
def defn(self):
"""Label definition for label-dict"""
return '(%s-def "%s" %s)' % (self._type, self._name, self._s_expr)
@property
def ref(self):
"""Reference to label defined in label-dict"""
return '(%s "%s")' % (self._type, self._name)
@property
def name(self):
"""Name of the label"""
return self._name
@property
def loc(self):
"""S-expression defining the location of the label"""
return self._s_expr
def __eq__(self, other):
if other is None:
return False
elif not isinstance(other, ArbLabel):
raise TypeError('%s is not an ArbLabel' % str(other))
else:
return self._s_expr == other._s_expr
def __hash__(self):
return hash(self._s_expr)
def __repr__(self):
return self.defn
================================================
FILE: bluepyopt/ephys/base.py
================================================
'''Base class for ephys classes'''
class BaseEPhys(object):
'''Base class for ephys classes'''
def __init__(self, name='', comment=''):
self.name = name
self.comment = comment
def __str__(self):
return '%s: %s (%s)' % (self.__class__.__name__,
self.name, self.comment)
================================================
FILE: bluepyopt/ephys/create_acc.py
================================================
"""create JSON/ACC files for Arbor from a set of BluePyOpt.ephys parameters"""
# pylint: disable=R0914
import io
import logging
import pathlib
from collections import ChainMap, namedtuple, OrderedDict
import re
import jinja2
import json
import shutil
from bluepyopt.ephys.acc import arbor
from bluepyopt.ephys.morphologies import ArbFileMorphology
from bluepyopt.ephys.create_hoc import (
Location,
RangeExpr,
PointExpr,
_get_template_params,
format_float,
)
logger = logging.getLogger(__name__)
# Inhomogeneous expression for scaled parameter in Arbor
RangeIExpr = namedtuple("RangeIExpr", "name, value, scale")
class ArbVar:
"""Definition of a Neuron to Arbor parameter conversion"""
def __init__(self, name, conv=None):
"""Constructor
Args:
name (str): Arbor parameter name
conv (): Conversion of parameter value from Neuron units
to Arbor (defaults to identity)
"""
self.name = name
self.conv = conv
def __repr__(self):
return "ArbVar(%s, %s)" % (self.name, self.conv)
class Nrn2ArbParamAdapter:
"""Converts a Neuron parameter to Arbor format (name and value)"""
_mapping = dict(
v_init=ArbVar(name="membrane-potential"),
celsius=ArbVar(
name="temperature-kelvin", conv=lambda celsius: celsius + 273.15
),
Ra=ArbVar(name="axial-resistivity"),
cm=ArbVar(
name="membrane-capacitance", conv=lambda cm: cm / 100.0
), # NEURON: uF/cm^2, Arbor: F/m^2
**{
species + loc[0]: ArbVar(
name='ion-%sternal-concentration "%s"' % (loc, species)
)
for species in ["na", "k", "ca"]
for loc in ["in", "ex"]
},
**{
"e" + species: ArbVar(name='ion-reversal-potential "%s"' % species)
for species in ["na", "k", "ca"]
},
)
@classmethod
def _param_name(cls, name):
"""Neuron to Arbor parameter renaming
Args:
name (str): Neuron parameter name
"""
return cls._mapping[name].name if name in cls._mapping else name
@classmethod
def _param_value(cls, param):
"""Neuron to Arbor units conversion for parameter values
Args:
param (): A Neuron parameter with a value in Neuron units
"""
if (
param.name in cls._mapping
and cls._mapping[param.name].conv is not None
):
return format_float(
cls._mapping[param.name].conv(float(param.value))
)
else:
return (
param.value
if isinstance(param.value, str)
else format_float(param.value)
)
@classmethod
def _conv_param(cls, param, name):
"""Convert a Neuron parameter to Arbor format (name and units)
Args:
param (): A Neuron parameter
name (): Parameter name without mech prefix/suffix
"""
if isinstance(param, Location):
return Location(
name=cls._param_name(name), value=cls._param_value(param)
)
elif isinstance(param, RangeExpr):
return RangeExpr(
location=param.location,
name=cls._param_name(name),
value=cls._param_value(param),
value_scaler=param.value_scaler,
)
elif isinstance(param, PointExpr):
return PointExpr(
name=cls._param_name(name),
point_loc=param.point_loc,
value=cls._param_value(param),
)
else:
raise CreateAccException(
"Unsupported parameter expression type %s." % type(param)
)
@classmethod
def format(cls, param, mechs):
"""Find a parameter's mechanism and convert name to Arbor format
Args:
param (): A parameter in Neuron format
mechs (): List of co-located NMODL mechanisms
Returns:
A tuple of mechanism name (None for a non-mechanism parameter) and
parameter in Arbor format
"""
if not isinstance(param, PointExpr):
mech_matches = [
i
for i, mech in enumerate(mechs)
if param.name.endswith("_" + mech)
]
else:
param_pprocesses = [loc.pprocess_mech for loc in param.point_loc]
mech_matches = [
i for i, mech in enumerate(mechs) if mech in param_pprocesses
]
if len(mech_matches) == 0:
return None, cls._conv_param(param, name=param.name)
elif len(mech_matches) == 1:
mech = mechs[mech_matches[0]]
if not isinstance(param, PointExpr):
name = param.name[: -(len(mech) + 1)]
else:
name = param.name
return mech, cls._conv_param(param, name=name)
else:
raise CreateAccException(
"Parameter name %s matches" % param.name
+ " multiple mechanisms %s"
% [repr(mechs[i]) for i in mech_matches]
)
class Nrn2ArbMechGrouper:
"""Group parameters by mechanism and convert them to Arbor format"""
@staticmethod
def _is_global_property(loc, param):
"""Returns if a label-specific variable is a global property in Arbor
Args:
loc (): An Arbor label describing the location
param (): A parameter in Arbor format (name and units)
"""
return loc == ArbFileMorphology.region_labels["all"] and (
param.name
in [
"membrane-potential",
"temperature-kelvin",
"axial-resistivity",
"membrane-capacitance",
]
or param.name.split(" ")[0]
in [
"ion-internal-concentration",
"ion-external-concentration",
"ion-reversal-potential",
]
)
@classmethod
def _separate_global_properties(cls, loc, mechs):
"""Separates global properties from a label-specific dict of mechanisms
Args:
loc (): An Arbor label describing the location
mechs (): A mapping of mechanism name to list of parameters in
Arbor format (None for non-mechanism parameters).
Returns:
A split of mechs into mechanisms without Arbor global properties
(first component) and a dict with Arbor global properties
(second component)
"""
local_mechs = dict()
global_properties = []
for mech, params in mechs.items():
if mech is None:
local_properties = []
for param in params:
if cls._is_global_property(loc, param):
global_properties.append(param)
else:
local_properties.append(param)
local_mechs[mech] = local_properties
else:
local_mechs[mech] = params
return local_mechs, {None: global_properties}
@staticmethod
def _format_params_and_group_by_mech(params, channels):
"""Group list of parameters by mechanism and turn them to Arbor format
Args:
params (): List of parameters in Neuron format
channels (): List of co-located NMODL mechanisms
Returns:
Mapping of Arbor mechanism name to list of parameters in Arbor
format
"""
mech_params = [
Nrn2ArbParamAdapter.format(param, channels) for param in params
]
mechs = {mech: [] for mech, _ in mech_params}
for mech in channels:
if mech not in mechs:
mechs[mech] = []
for mech, param in mech_params:
mechs[mech].append(param)
return mechs
@classmethod
def process_global(cls, params):
"""Group global BluePyOpt params by mech, convert them to Arbor format
Args:
params (): List of global parameters in Neuron format
Returns:
A mapping of mechanism to parameters representing Arbor global
properties. The mechanism parameters are in Arbor format
(mechanism name is None for non-mechanism parameters).
"""
return cls._format_params_and_group_by_mech(
[
Location(name=name, value=value)
for name, value in params.items()
],
[], # no default mechanisms
)
@classmethod
def process_local(cls, params, channels):
"""Group local BluePyOpt params by mech, convert them to Arbor format
Args:
params (): List of Arbor label/local parameters pairs in Neuron
format
channels (): Mapping of Arbor label to co-located NMODL mechanisms
Returns:
The return value is a tuple. In the first component, a two-level
mapping of Arbor label to mechanism to parameters. The mechanism
parameters are in Arbor format (mechanism name is None for
non-mechanism parameters). In the second component, the
Arbor global properties found are returned.
"""
local_mechs = dict()
global_properties = dict()
for loc, loc_params in params:
mechs = cls._format_params_and_group_by_mech(
loc_params, channels[loc]
)
# move Arbor global properties to global_params
mechs, global_props = cls._separate_global_properties(loc, mechs)
if global_props.keys() != {None}:
raise CreateAccException(
"Support for Arbor default mechanisms not implemented."
)
# iterate over global_props items if above exception triggers
global_properties[None] = (
global_properties.get(None, []) + global_props[None]
)
local_mechs[loc] = mechs
return local_mechs, global_properties
def _arb_filter_point_proc_locs(pprocess_mechs):
"""Filter locations from point process parameters
Args:
pprocess_mechs (): Point process mechanisms with parameters in
Arbor format
"""
result = {loc: dict() for loc in pprocess_mechs}
for loc, mechs in pprocess_mechs.items():
for mech, point_exprs in mechs.items():
result[loc][mech.name] = dict(
mech=mech.suffix,
params=[
Location(point_expr.name, point_expr.value)
for point_expr in point_exprs
],
)
return result
def _arb_append_scaled_mechs(mechs, scaled_mechs):
"""Append scaled mechanism parameters to constant ones"""
for mech, scaled_params in scaled_mechs.items():
if mech is None and len(scaled_params) > 0:
raise CreateAccException(
"Non-mechanism parameters cannot have inhomogeneous"
" expressions in Arbor %s" % scaled_params
)
mechs[mech] = mechs.get(mech, []) + [
RangeIExpr(
name=p.name,
value=p.value,
scale=p.value_scaler.acc_scale_iexpr(p.value),
)
for p in scaled_params
]
# An mechanism's NMODL GLOBAL and RANGE variables in Arbor
MechMetaData = namedtuple("MechMetaData", "globals, ranges")
class ArbNmodlMechFormatter:
"""Loads catalogue metadata and reformats mechanism name for ACC"""
def __init__(self, ext_catalogues):
"""Load metadata of external and Arbor's built-in mechanism catalogues
Args:
ext_catalogues (): Mapping of catalogue name to directory
with NMODL files defining the mechanisms.
"""
self.cats = self._load_mech_catalogue_meta(ext_catalogues)
@staticmethod
def _load_catalogue_meta(cat_dir):
"""Load mechanism catalogue metadata from NMODL files
Args:
cat_dir (): Path to directory with NMODL files of catalogue
Returns:
Mapping of name to meta data for each mechanism in the directory
"""
# used to generate arbor_mechanisms.json on NMODL from arbor/mechanisms
nmodl_pattern = r"^\s*%s\s+((?:\w+\,\s*)*?\w+)\s*?$" # NOQA
suffix_pattern = nmodl_pattern % "SUFFIX"
globals_pattern = nmodl_pattern % "GLOBAL"
ranges_pattern = nmodl_pattern % "RANGE"
def process_nmodl(nmodl_str):
"""Extract global and range params from Arbor-conforming NMODL"""
try:
nrn = re.search(
r"NEURON\s+{([^}]+)}", nmodl_str, flags=re.MULTILINE
).group(1)
suffix_ = re.search(suffix_pattern, nrn, flags=re.MULTILINE)
suffix_ = suffix_ if suffix_ is None else suffix_.group(1)
globals_ = re.search(globals_pattern, nrn, flags=re.MULTILINE)
globals_ = (
globals_
if globals_ is None
else re.findall(r"\w+", globals_.group(1))
)
ranges_ = re.search(ranges_pattern, nrn, flags=re.MULTILINE)
ranges_ = (
ranges_
if ranges_ is None
else re.findall(r"\w+", ranges_.group(1))
)
except Exception as e:
raise CreateAccException(
"NMODL-inspection for %s failed." % nmodl_file
) from e
# skipping suffix_
return MechMetaData(globals=globals_, ranges=ranges_)
mechs = dict()
cat_dir = pathlib.Path(cat_dir)
for nmodl_file in cat_dir.glob("*.mod"):
with open(cat_dir.joinpath(nmodl_file)) as f:
mechs[nmodl_file.stem] = process_nmodl(f.read())
return mechs
@classmethod
def _load_mech_catalogue_meta(cls, ext_catalogues):
"""Load metadata of external and Arbor's built-in mechanism catalogues
Args:
ext_catalogues (): Mapping of catalogue name to directory
with NMODL files defining the mechanisms
Returns:
Ordered mapping of catalogue name -> mechanism name -> meta data
for external and built-in catalogues (external ones taking
precedence)
"""
arb_cats = OrderedDict()
if ext_catalogues is not None:
for cat, cat_nmodl in ext_catalogues.items():
arb_cats[cat] = cls._load_catalogue_meta(
pathlib.Path(cat_nmodl).resolve()
)
builtin_catalogues = (
pathlib.Path(__file__)
.parent.joinpath("static/arbor_mechanisms.json")
.resolve()
)
with open(builtin_catalogues) as f:
builtin_arb_cats = json.load(f)
for cat in ["BBP", "default", "allen"]:
if cat not in arb_cats:
arb_cats[cat] = {
mech: MechMetaData(**meta)
for mech, meta in builtin_arb_cats[cat].items()
}
return arb_cats
@staticmethod
def _mech_name(name):
"""Neuron to Arbor mechanism name conversion
Args:
name (): A Neuron mechanism name
"""
if name in ["Exp2Syn", "ExpSyn"]:
return name.lower()
else:
return name
@classmethod
def _translate_mech(cls, mech_name, mech_params, arb_cats):
"""Translate NMODL mechanism to Arbor ACC format
Args:
mech_name (): NMODL mechanism name (suffix)
mech_params (): Mechanism parameters in Arbor format
arb_cats (): Mapping of catalogue names to mechanisms
with theirmeta data
Returns:
Tuple of mechanism name with NMODL GLOBAL parameters integrated and
catalogue prefix added as well as the remaining RANGE parameters
"""
arb_mech = None
arb_mech_name = cls._mech_name(mech_name)
for cat in arb_cats: # in order of precedence
if arb_mech_name in arb_cats[cat]:
arb_mech = arb_cats[cat][arb_mech_name]
mech_name = cat + "::" + arb_mech_name
break
if arb_mech is None: # not Arbor built-in mech, no qualifier added
if mech_name is not None:
logger.warn(
"create_acc: Could not find Arbor mech for %s (%s)."
% (mech_name, mech_params)
)
return (mech_name, mech_params)
else:
if arb_mech.globals is None: # only Arbor range params
for param in mech_params:
if param.name not in arb_mech.ranges:
raise CreateAccException(
"%s not a GLOBAL or RANGE parameter of %s"
% (param.name, mech_name)
)
return (mech_name, mech_params)
else:
for param in mech_params:
if (
param.name not in arb_mech.globals
and param.name not in arb_mech.ranges
):
raise CreateAccException(
"%s not a GLOBAL or RANGE parameter of %s"
% (param.name, mech_name)
)
mech_name_suffix = []
remaining_mech_params = []
for mech_param in mech_params:
if mech_param.name in arb_mech.globals:
mech_name_suffix.append(
mech_param.name + "=" + mech_param.value
)
if isinstance(mech_param, RangeIExpr):
remaining_mech_params.append(
RangeIExpr(
name=mech_param.name,
value=None,
scale=mech_param.scale,
)
)
else:
remaining_mech_params.append(mech_param)
if len(mech_name_suffix) > 0:
mech_name += "/" + ",".join(mech_name_suffix)
return (mech_name, remaining_mech_params)
def translate_density(self, mechs):
"""Translate all density mechanisms in a specific region"""
return dict(
[
self._translate_mech(mech, params, self.cats)
for mech, params in mechs.items()
]
)
def translate_points(self, mechs):
"""Translate all point mechanisms for a specific locset"""
result = dict()
for synapse_name, mech_desc in mechs.items():
mech, params = self._translate_mech(
mech_desc["mech"], mech_desc["params"], self.cats
)
result[synapse_name] = dict(mech=mech, params=params)
return result
def _arb_project_scaled_mechs(mechs):
"""Returns all (iexpr) parameters of scaled mechanisms in Arbor"""
scaled_mechs = dict()
for mech, params in mechs.items():
range_iexprs = [p for p in params if isinstance(p, RangeIExpr)]
if len(range_iexprs) > 0:
scaled_mechs[mech] = range_iexprs
return scaled_mechs
def _arb_populate_label_dict(local_mechs, local_scaled_mechs, pprocess_mechs):
"""Creates a dict of labels from label-specific parameters/mechanisms
Args:
local_mechs (): label-specific parameters/density mechanisms
local_scaled_mechs (): label-specific iexpr parameters/density mechs
pprocess_mechs (): label-specific point processes
Returns:
A dict mapping label name to ArbLabel for each label in the input
"""
label_dict = dict()
acc_labels = ChainMap(local_mechs, local_scaled_mechs, pprocess_mechs)
for acc_label in acc_labels:
if (
acc_label.name in label_dict
and acc_label != label_dict[acc_label.name]
):
raise CreateAccException(
"Label %s already exists in"
% acc_label.name
+ " label_dict with different s-expression: "
" %s != %s." % (label_dict[acc_label.name].loc, acc_label.loc)
)
elif acc_label.name not in label_dict:
label_dict[acc_label.name] = acc_label
return label_dict
def _read_templates(template_dir, template_filename):
"""Expand Jinja2 template filepath with glob and
return dict of target filename -> parsed template"""
if template_dir is None:
template_dir = (
pathlib.Path(__file__).parent.joinpath("templates").resolve()
)
template_paths = pathlib.Path(template_dir).glob(template_filename)
templates = dict()
for template_path in template_paths:
with open(template_path) as template_file:
template = template_file.read()
name = template_path.name
if name.endswith(".jinja2"):
name = name[:-7]
if name.endswith("_template"):
name = name[:-9]
if "_" in name:
name = ".".join(name.rsplit("_", 1))
templates[name] = jinja2.Template(template)
if templates == {}:
raise FileNotFoundError(
f"No templates found for JSON/ACC-export in {template_dir}"
)
return templates
def _arb_loc_desc(location, param_or_mech):
"""Generate Arbor location description for label dict and decor"""
return location.acc_label()
def create_acc(
mechs,
parameters,
morphology=None,
morphology_dir=None,
ext_catalogues=None,
ignored_globals=(),
replace_axon=None,
create_mod_morph=False,
template_name="CCell",
template_filename="acc/*_template.jinja2",
disable_banner=None,
template_dir=None,
custom_jinja_params=None,
):
"""return a dict with strings containing the rendered JSON/ACC templates
Args:
mechs (): All the mechs for the decor template
parameters (): All the parameters in the decor/label-dict template
morphology (str): Name of morphology
morphology_dir (str): Directory of morphology
ext_catalogues (): Name to path mapping of non-Arbor built-in
NMODL mechanism catalogues compiled with modcc
ignored_globals (iterable str): Skipped NrnGlobalParameter in decor
replace_axon (): Axon replacement morphology
create_mod_morph (): Create ACC morphology with axon replacement
template_filename (str): file path of the cell.json , decor.acc and
label_dict.acc jinja2 templates (with wildcards expanded by glob)
template_dir (str): dir name of the jinja2 templates
custom_jinja_params (dict): dict of additional jinja2 params in case
of a custom template
"""
if custom_jinja_params is None:
custom_jinja_params = {}
if pathlib.Path(morphology).suffix.lower() not in [".swc", ".asc"]:
raise CreateAccException(
"Morphology file %s not supported in Arbor "
" (only supported types are .swc and .asc)." % morphology
)
if replace_axon is not None:
if not hasattr(arbor.segment_tree, "tag_roots"):
raise NotImplementedError(
"Need a newer version of Arbor" " for axon replacement."
)
logger.debug(
"Obtain axon replacement by applying "
"ArbFileMorphology.replace_axon after loading "
"morphology in Arbor."
)
replace_axon_path = (
pathlib.Path(morphology).stem + "_axon_replacement.acc"
)
replace_axon_acc = io.StringIO()
arbor.write_component(replace_axon, replace_axon_acc)
replace_axon_acc.seek(0)
if create_mod_morph:
modified_morphology_path = (
pathlib.Path(morphology).stem + "_modified.acc"
)
modified_morpho = ArbFileMorphology.load(
pathlib.Path(morphology_dir).joinpath(morphology),
replace_axon_acc,
)
replace_axon_acc.seek(0)
modified_morphology_acc = io.StringIO()
arbor.write_component(modified_morpho, modified_morphology_acc)
modified_morphology_acc.seek(0)
modified_morphology_acc = modified_morphology_acc.read()
else:
modified_morphology_path = None
modified_morphology_acc = None
replace_axon_acc = replace_axon_acc.read()
else:
replace_axon_path = None
modified_morphology_path = None
templates = _read_templates(template_dir, template_filename)
default_location_order = list(ArbFileMorphology.region_labels.values())
template_params = _get_template_params(
mechs,
parameters,
ignored_globals,
disable_banner,
default_location_order,
_arb_loc_desc,
)
filenames = {
name: template_name + (name if name.startswith(".") else "_" + name)
for name in templates.keys()
}
# postprocess template parameters for Arbor
channels = template_params["channels"]
point_channels = template_params["point_channels"]
banner = template_params["banner"]
# global_mechs refer to default density mechs/params in Arbor
# [mech -> param] (params under mech == None)
global_mechs = Nrn2ArbMechGrouper.process_global(
template_params["global_params"]
)
# local_mechs refer to locally painted density mechs/params in Arbor
# [label -> mech -> param.name/.value] (params under mech == None)
local_mechs, additional_global_mechs = Nrn2ArbMechGrouper.process_local(
template_params["section_params"], channels
)
for mech, params in additional_global_mechs.items():
global_mechs[mech] = global_mechs.get(mech, []) + params
# scaled_mechs refer to iexpr params of scaled density mechs in Arbor
# [label -> mech -> param.location/.name/.value/.value_scaler]
range_params = {loc: [] for loc in default_location_order}
for param in template_params["range_params"]:
range_params[param.location].append(param)
range_params = list(range_params.items())
local_scaled_mechs, global_scaled_mechs = Nrn2ArbMechGrouper.process_local(
range_params, channels
)
# join each mech's constant params with inhomogeneous ones on mechanisms
_arb_append_scaled_mechs(global_mechs, global_scaled_mechs)
for loc in local_scaled_mechs:
_arb_append_scaled_mechs(local_mechs[loc], local_scaled_mechs[loc])
# pprocess_mechs refer to locally placed mechs/params in Arbor
# [label -> mech -> param.name/.value]
pprocess_mechs, global_pprocess_mechs = Nrn2ArbMechGrouper.process_local(
template_params["pprocess_params"], point_channels
)
if any(len(params) > 0 for params in global_pprocess_mechs.values()):
raise CreateAccException(
"Point process mechanisms cannot be" " placed globally in Arbor."
)
# Evaluate synapse locations
# (no new labels introduced, but locations explicitly defined)
pprocess_mechs = _arb_filter_point_proc_locs(pprocess_mechs)
# NMODL formatter loads metadata of external and Arbor's built-in
# mech catalogues
nmodl_formatter = ArbNmodlMechFormatter(ext_catalogues)
# translate mechs to Arbor's nomenclature
global_mechs = nmodl_formatter.translate_density(global_mechs)
local_mechs = {
loc: nmodl_formatter.translate_density(mechs)
for loc, mechs in local_mechs.items()
}
pprocess_mechs = {
loc: nmodl_formatter.translate_points(mechs)
for loc, mechs in pprocess_mechs.items()
}
# get iexpr parameters of scaled density mechs
global_scaled_mechs = _arb_project_scaled_mechs(global_mechs)
local_scaled_mechs = {
loc: _arb_project_scaled_mechs(mechs)
for loc, mechs in local_mechs.items()
}
# populate label dict
label_dict = _arb_populate_label_dict(
local_mechs, local_scaled_mechs, pprocess_mechs
)
ret = {
filenames[name]: template.render(
template_name=template_name,
banner=banner,
morphology=morphology,
replace_axon=replace_axon_path,
modified_morphology=modified_morphology_path,
filenames=filenames,
label_dict=label_dict,
global_mechs=global_mechs,
global_scaled_mechs=global_scaled_mechs,
local_mechs=local_mechs,
local_scaled_mechs=local_scaled_mechs,
pprocess_mechs=pprocess_mechs,
**custom_jinja_params,
)
for name, template in templates.items()
}
if replace_axon is not None:
ret[replace_axon_path] = replace_axon_acc
if modified_morphology_path is not None:
ret[modified_morphology_path] = modified_morphology_acc
return ret
def write_acc(
output_dir,
cell,
parameters,
template_filename="acc/*_template.jinja2",
ext_catalogues=None,
create_mod_morph=False,
sim=None,
):
"""Output mixed JSON/ACC format for Arbor cable cell to files
Args:
output_dir (str): Output directory. If not exists, will be created
cell (): Cell model to output
parameters (): Values for mechanism parameters, etc.
template_filename (str): file path of the cell.json , decor.acc and
label_dict.acc jinja2 templates (with wildcards expanded by glob)
ext_catalogues (): Name to path mapping of non-Arbor built-in
NMODL mechanism catalogues compiled with modcc
create_mod_morph (str): Output ACC with axon replacement
sim (): Neuron simulator instance (only used used with axon
replacement if morphology has not yet been instantiated)
"""
output = cell.create_acc(
parameters,
template=template_filename,
ext_catalogues=ext_catalogues,
create_mod_morph=create_mod_morph,
sim=sim,
)
cell_json = [
comp_rendered
for comp, comp_rendered in output.items()
if pathlib.Path(comp).suffix == ".json"
]
if len(cell_json) != 1:
raise CreateAccException(
"JSON file from create_acc is non-unique: %s" % cell_json
)
cell_json = json.loads(cell_json[0])
output_dir = pathlib.Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
for comp, comp_rendered in output.items():
comp_filename = output_dir.joinpath(comp)
if comp_filename.exists():
raise CreateAccException("%s already exists!" % comp_filename)
with open(output_dir.joinpath(comp), "w") as f:
f.write(comp_rendered)
morpho_filename = output_dir.joinpath(cell_json["morphology"]["original"])
if morpho_filename.exists():
raise CreateAccException("%s already exists!" % morpho_filename)
shutil.copy2(cell.morphology.morphology_path, morpho_filename)
# Read the mixed JSON/ACC-output, to be moved to Arbor in future release
def read_acc(cell_json_filename):
"""Return constituents to build an Arbor cable cell from create_acc-export
Args:
cell_json_filename (str): The path to the JSON file containing
meta-information on morphology, label-dict and decor of exported cell
"""
with open(cell_json_filename) as cell_json_file:
cell_json = json.load(cell_json_file)
cell_json_dir = pathlib.Path(cell_json_filename).parent
morpho_filename = cell_json_dir.joinpath(
cell_json["morphology"]["original"]
)
replace_axon = cell_json["morphology"].get("replace_axon", None)
if replace_axon is not None:
replace_axon = cell_json_dir.joinpath(replace_axon)
morpho = ArbFileMorphology.load(morpho_filename, replace_axon)
decor = arbor.load_component(
cell_json_dir.joinpath(cell_json["decor"])
).component
labels = arbor.load_component(
cell_json_dir.joinpath(cell_json["label_dict"])
).component
return cell_json, morpho, decor, labels
class CreateAccException(Exception):
"""Exceptions generated by create_acc module"""
def __init__(self, message):
"""Constructor"""
super(CreateAccException, self).__init__(message)
================================================
FILE: bluepyopt/ephys/create_hoc.py
================================================
'''create a hoc file from a set of BluePyOpt.ephys parameters'''
# pylint: disable=R0914
import os
import re
from collections import defaultdict, namedtuple, OrderedDict
from datetime import datetime
import jinja2
import bluepyopt
from bluepyopt.ephys.locations import (NrnSeclistCompLocation,
NrnSeclistLocation,
NrnSectionCompLocation,
NrnSomaDistanceCompLocation,
NrnSecSomaDistanceCompLocation,
NrnTrunkSomaDistanceCompLocation,
ArbLocation)
from bluepyopt.ephys.mechanisms import (Mechanism,
NrnMODMechanism,
NrnMODPointProcessMechanism)
from bluepyopt.ephys.parameters import (NrnGlobalParameter,
NrnSectionParameter,
NrnRangeParameter,
NrnPointProcessParameter,
MetaParameter)
from bluepyopt.ephys.parameterscalers import (NrnSegmentSomaDistanceScaler,
NrnSegmentLinearScaler,
FLOAT_FORMAT,
format_float)
PointExpr = namedtuple('PointExpr', 'name, point_loc, value')
RangeExpr = namedtuple('RangeExpr', 'location, name, value, value_scaler')
# Consider renaming Location as name already used in locations module
Location = namedtuple('Location', 'name, value')
Range = namedtuple('Range', 'location, param_name, value')
DEFAULT_LOCATION_ORDER = [
'all',
'apical',
'axonal',
'basal',
'somatic',
'myelinated']
def generate_channels_by_location(mechs, location_order):
"""Create a OrderedDictionary of all channel mechs for hoc template.
Args:
mechs (list of bluepyopt.ephys.mechanisms.Mechanism): mechanisms
location_order (list of str): order of locations
Returns: tuple of channels, point_channels and location order
"""
loc_desc = _loc_desc
return _generate_channels_by_location(mechs, location_order, loc_desc)
def _generate_channels_by_location(mechs, location_order, loc_desc):
"""Create a OrderedDictionary of all channel mechs for hoc template."""
channels = OrderedDict((location, []) for location in location_order)
point_channels = OrderedDict((location, []) for location in location_order)
for mech in mechs:
name = mech.suffix
for location in mech.locations:
if isinstance(mech, NrnMODPointProcessMechanism):
point_channels[loc_desc(location, mech)].append(mech)
else:
channels[loc_desc(location, mech)].append(name)
return channels, point_channels
def generate_reinitrng(mechs) -> str:
"""Create re_init_rng function"""
for mech in mechs:
if isinstance(mech, NrnMODPointProcessMechanism):
raise NotImplementedError(
'HOC generation for models with point process mechanisms'
' is not yet supported.')
reinitrng_hoc_blocks = ''
for mech in mechs:
reinitrng_hoc_blocks += mech.generate_reinitrng_hoc_block()
reinitrng_content = NrnMODMechanism.hash_hoc_string
reinitrng_content += NrnMODMechanism.reinitrng_hoc_string % {
'reinitrng_hoc_blocks': reinitrng_hoc_blocks}
return reinitrng_content
def range_exprs_to_hoc(range_params):
"""Process raw range parameters to hoc strings"""
ret = []
for param in range_params:
value = param.value_scaler.inst_distribution
value = re.sub(r'math\.', '', value)
value = re.sub(r'\&', '&&', value)
value = re.sub('{distance}', FLOAT_FORMAT, value)
value = re.sub('{value}', format_float(param.value), value)
if hasattr(param.value_scaler, "step_begin"):
value = re.sub(
'{step_begin}',
format_float(param.value_scaler.step_begin),
value
)
value = re.sub(
'{step_end}', format_float(param.value_scaler.step_end), value
)
ret.append(Range(param.location, param.name, value))
return ret
def _loc_desc(location, param_or_mech):
"""Generate Neuron location description for HOC template"""
if isinstance(param_or_mech, Mechanism):
if isinstance(param_or_mech, NrnMODMechanism):
if isinstance(location, NrnSeclistLocation):
return location.seclist_name
else:
raise CreateHocException(
"%s is currently not supported for mechs." %
type(location).__name__)
elif isinstance(param_or_mech, NrnMODPointProcessMechanism):
raise CreateHocException("%s is currently not supported." %
type(param_or_mech).__name__)
elif not isinstance(location, (NrnSeclistCompLocation,
NrnSectionCompLocation,
NrnSomaDistanceCompLocation,
NrnSecSomaDistanceCompLocation,
NrnTrunkSomaDistanceCompLocation,
ArbLocation)) and \
not isinstance(param_or_mech, NrnPointProcessParameter):
return location.seclist_name
else:
raise CreateHocException("%s is currently not supported." %
type(param_or_mech).__name__)
def generate_parameters(parameters):
"""Create a list of parameters that need to be added to the hoc template
Args:
parameters (list of bluepyopt.Parameters): parameters in hoc template
Returns: tuple of global, section, range, pprocess and location order
"""
location_order = DEFAULT_LOCATION_ORDER
loc_desc = _loc_desc
return _generate_parameters(parameters, location_order, loc_desc)
def _generate_parameters(parameters, location_order, loc_desc):
"""Create a list of parameters that need to be added to the hoc template"""
param_locations = defaultdict(list)
global_params = {}
for param in parameters:
if isinstance(param, NrnGlobalParameter):
global_params[param.param_name] = param.value
elif isinstance(param, MetaParameter):
pass
else:
assert isinstance(
param.locations, (tuple, list)), 'Must have locations list'
for location in param.locations:
locs = loc_desc(location, param)
if not isinstance(locs, list):
param_locations[locs].append(param)
else:
for loc in locs:
param_locations[loc].append(param)
section_params = defaultdict(list)
pprocess_params = defaultdict(list)
range_params = []
for loc in param_locations:
if loc not in location_order:
location_order.append(loc)
for loc in location_order:
if loc not in param_locations:
continue
for param in param_locations[loc]:
if not isinstance(param.param_dependencies, list) or \
len(param.param_dependencies) > 0:
raise CreateHocException( # also an ACC exception
'Exporting models with parameters that have'
' param_dependencies is not yet supported.')
if isinstance(param, NrnRangeParameter):
if isinstance(
param.value_scaler,
NrnSegmentSomaDistanceScaler):
range_params.append(
RangeExpr(loc,
param.param_name,
param.value,
param.value_scaler))
elif isinstance(param.value_scaler, NrnSegmentLinearScaler):
value = param.value_scale_func(param.value)
section_params[loc].append(
Location(param.param_name, format_float(value)))
elif isinstance(param, NrnSectionParameter):
value = param.value_scale_func(param.value)
section_params[loc].append(
Location(param.param_name, format_float(value)))
elif isinstance(param, NrnPointProcessParameter):
value = param.value
pprocess_params[loc].append(
PointExpr(param.param_name, param.locations,
format_float(value)))
ordered_section_params = [(loc, section_params[loc])
for loc in location_order]
ordered_pprocess_params = [(loc, pprocess_params[loc])
for loc in location_order]
return global_params, ordered_section_params, range_params, \
ordered_pprocess_params, location_order
def _read_template(template_dir, template_filename):
"""Read Jinja2 hoc template to render"""
if template_dir is None:
template_dir = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'templates'))
template_path = os.path.join(template_dir, template_filename)
with open(template_path) as template_file:
template = template_file.read()
template = jinja2.Template(template)
return template
def _get_template_params(
mechs,
parameters,
ignored_globals,
disable_banner,
default_location_order,
loc_desc):
'''return parameters to render Jinja2 templates with simulator descriptions
Args:
mechs (): All the mechs for the hoc template
parameters (): All the parameters in the hoc template
ignored_globals (iterable str): HOC coded is added for each
NrnGlobalParameter
that exists, to test that it matches the values set in the parameters.
This iterable contains parameter names that aren't checked
default_location_order (): list of ordered simulator-specific locations
to use by default
loc_desc (): method that extracts simulator-specific location
description from pair of locations and mechanisms/parameters
'''
global_params, section_params, range_params, \
pprocess_params, location_order = \
_generate_parameters(parameters, default_location_order, loc_desc)
channels, point_channels = _generate_channels_by_location(
mechs, location_order, loc_desc)
ignored_global_params = {}
for ignored_global in ignored_globals:
if ignored_global in global_params:
ignored_global_params[
ignored_global] = global_params[ignored_global]
del global_params[ignored_global]
if not disable_banner:
banner = 'Created by BluePyOpt(%s) at %s' % (
bluepyopt.__version__, datetime.now())
else:
banner = None
return dict(global_params=global_params,
ignored_global_params=ignored_global_params,
section_params=section_params,
range_params=range_params,
pprocess_params=pprocess_params,
location_order=location_order,
channels=channels,
point_channels=point_channels,
banner=banner)
def create_hoc(mechs,
parameters,
morphology=None,
ignored_globals=(),
replace_axon=None,
template_name='CCell',
template_filename='cell_template.jinja2',
disable_banner=None,
template_dir=None,
custom_jinja_params=None):
'''return a string containing the hoc template
Args:
mechs (): All the mechs for the hoc template
parameters (): All the parameters in the hoc template
morpholgy (str): Name of morphology
ignored_globals (iterable str): HOC coded is added for each
NrnGlobalParameter
that exists, to test that it matches the values set in the parameters.
This iterable contains parameter names that aren't checked
replace_axon (str): String replacement for the 'replace_axon' command.
Must include 'proc replace_axon(){ ... }
template_filename (str): file name of the jinja2 template
template_dir (str): dir name of the jinja2 template
custom_jinja_params (dict): dict of additional jinja2 params in case
of a custom template
'''
template = _read_template(template_dir, template_filename)
template_params = _get_template_params(mechs,
parameters,
ignored_globals,
disable_banner,
DEFAULT_LOCATION_ORDER,
_loc_desc)
# delete empty dicts to avoid conflict with custom_jinja_params
del template_params['pprocess_params']
del template_params['point_channels']
template_params['range_params'] = range_exprs_to_hoc(
template_params['range_params']
)
re_init_rng = generate_reinitrng(mechs)
if custom_jinja_params is None:
custom_jinja_params = {}
return template.render(template_name=template_name,
morphology=morphology,
replace_axon=replace_axon,
re_init_rng=re_init_rng,
**template_params,
**custom_jinja_params)
class CreateHocException(Exception):
"""All exceptions generated by create_hoc module"""
def __init__(self, message):
"""Constructor"""
super(CreateHocException, self).__init__(message)
================================================
FILE: bluepyopt/ephys/efeatures.py
================================================
"""eFeature classes"""
"""
Copyright (c) 2016-2020, EPFL/Blue Brain Project
This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License version 3.0 as published
by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# pylint: disable=R0914
import logging
import numpy as np
from bluepyopt.ephys.base import BaseEPhys
from bluepyopt.ephys.serializer import DictMixin
from .extra_features_utils import *
logger = logging.getLogger(__name__)
def masked_cosine_distance(exp, model):
from scipy.spatial import distance
exp_mask = np.isfinite(exp)
model_mask = np.isfinite(model)
valid_mask = exp_mask & model_mask
score = distance.cosine(
exp[valid_mask], model[valid_mask]
)
score *= sum(exp_mask) / len(valid_mask)
return score
class EFeature(BaseEPhys):
"""EPhys feature"""
pass
class eFELFeature(EFeature, DictMixin):
"""eFEL feature"""
SERIALIZED_FIELDS = ('name', 'efel_feature_name', 'recording_names',
'stim_start', 'stim_end', 'exp_mean',
'exp_std', 'threshold', 'comment')
def __init__(
self,
name,
efel_feature_name=None,
recording_names=None,
stim_start=None,
stim_end=None,
exp_mean=None,
exp_std=None,
threshold=None,
stimulus_current=None,
comment='',
interp_step=None,
double_settings=None,
int_settings=None,
string_settings=None,
force_max_score=False,
max_score=250
):
"""Constructor
Args:
name (str): name of the eFELFeature object
efel_feature_name (str): name of the eFeature in the eFEL library
(ex: 'AP1_peak')
recording_names (dict): eFEL features can accept several recordings
as input
stim_start (float): stimulation start time (ms)
stim_end (float): stimulation end time (ms)
exp_mean (float): experimental mean of this eFeature
exp_std(float): experimental standard deviation of this eFeature
threshold(float): spike detection threshold (mV)
comment (str): comment
interp_step(float): interpolation step (ms)
double_settings(dict): dictionary with efel double settings that
should be set before extracting the features
int_settings(dict): dictionary with efel int settings that
should be set before extracting the features
string_settings(dict): dictionary with efel string settings that
should be set before extracting the features
"""
super(eFELFeature, self).__init__(name, comment)
self.recording_names = recording_names
self.efel_feature_name = efel_feature_name
self.exp_mean = exp_mean
self.exp_std = exp_std
self.stim_start = stim_start
self.stim_end = stim_end
self.threshold = threshold
self.interp_step = interp_step
self.stimulus_current = stimulus_current
self.double_settings = double_settings
self.int_settings = int_settings
self.string_settings = string_settings
self.force_max_score = force_max_score
self.max_score = max_score
def _construct_efel_trace(self, responses):
"""Construct trace that can be passed to eFEL"""
trace = {}
if '' not in self.recording_names:
raise Exception(
'eFELFeature: \'\' needs to be in recording_names')
for location_name, recording_name in self.record
gitextract_wgjz66lh/ ├── .coveragerc ├── .gitattributes ├── .github/ │ └── workflows/ │ ├── build.yml │ ├── keep-alive.yml │ ├── mirror-ebrains.yml │ └── test.yml ├── .gitignore ├── .readthedocs.yaml ├── .zenodo.json ├── AUTHORS.txt ├── COPYING ├── COPYING.lesser ├── Dockerfile ├── LICENSE.txt ├── MANIFEST.in ├── Makefile ├── README.rst ├── bluepyopt/ │ ├── __init__.py │ ├── api.py │ ├── deapext/ │ │ ├── CMA_MO.py │ │ ├── CMA_SO.py │ │ ├── __init__.py │ │ ├── algorithms.py │ │ ├── hype.py │ │ ├── optimisations.py │ │ ├── optimisationsCMA.py │ │ ├── stoppingCriteria.py │ │ ├── tools/ │ │ │ ├── __init__.py │ │ │ └── selIBEA.py │ │ └── utils.py │ ├── ephys/ │ │ ├── __init__.py │ │ ├── acc.py │ │ ├── base.py │ │ ├── create_acc.py │ │ ├── create_hoc.py │ │ ├── efeatures.py │ │ ├── evaluators.py │ │ ├── examples/ │ │ │ ├── __init__.py │ │ │ └── simplecell/ │ │ │ ├── __init__.py │ │ │ ├── simple.swc │ │ │ └── simplecell.py │ │ ├── extra_features_utils.py │ │ ├── locations.py │ │ ├── mechanisms.py │ │ ├── models.py │ │ ├── morphologies.py │ │ ├── objectives.py │ │ ├── objectivescalculators.py │ │ ├── parameters.py │ │ ├── parameterscalers/ │ │ │ ├── __init__.py │ │ │ ├── acc_iexpr.py │ │ │ └── parameterscalers.py │ │ ├── protocols.py │ │ ├── recordings.py │ │ ├── responses.py │ │ ├── serializer.py │ │ ├── simulators.py │ │ ├── static/ │ │ │ └── arbor_mechanisms.json │ │ ├── stimuli.py │ │ └── templates/ │ │ ├── acc/ │ │ │ ├── _json_template.jinja2 │ │ │ ├── decor_acc_template.jinja2 │ │ │ └── label_dict_acc_template.jinja2 │ │ └── cell_template.jinja2 │ ├── evaluators.py │ ├── ipyp/ │ │ ├── __init__.py │ │ └── bpopt_tasksdb.py │ ├── neuroml/ │ │ ├── NeuroML2_mechanisms/ │ │ │ ├── Ca.channel.nml │ │ │ ├── Ca_HVA.channel.nml │ │ │ ├── Ca_LVAst.channel.nml │ │ │ ├── Ih.channel.nml │ │ │ ├── Im.channel.nml │ │ │ ├── K_Pst.channel.nml │ │ │ ├── K_Tst.channel.nml │ │ │ ├── KdShu2007.channel.nml │ │ │ ├── NaTa_t.channel.nml │ │ │ ├── NaTs2_t.channel.nml │ │ │ ├── Nap_Et2.channel.nml │ │ │ ├── SK_E2.channel.nml │ │ │ ├── SKv3_1.channel.nml │ │ │ ├── StochKv_deterministic.channel.nml │ │ │ ├── baseCaDynamics_E2_NML2.nml │ │ │ └── pas.channel.nml │ │ ├── __init__.py │ │ ├── biophys.py │ │ ├── cell.py │ │ ├── morphology.py │ │ └── simulation.py │ ├── objectives.py │ ├── optimisations.py │ ├── parameters.py │ ├── stoppingCriteria.py │ ├── tests/ │ │ ├── .gitignore │ │ ├── __init__.py │ │ ├── disable_simplecell_scoop.py │ │ ├── expected_results.json │ │ ├── test_bluepyopt.py │ │ ├── test_deapext/ │ │ │ ├── __init__.py │ │ │ ├── deapext_test_utils.py │ │ │ ├── test_algorithms.py │ │ │ ├── test_hype.py │ │ │ ├── test_optimisations.py │ │ │ ├── test_optimisationsCMA.py │ │ │ ├── test_selIBEA.py │ │ │ ├── test_stoppingCriteria.py │ │ │ └── test_utils.py │ │ ├── test_ephys/ │ │ │ ├── __init__.py │ │ │ ├── test_acc.py │ │ │ ├── test_create_acc.py │ │ │ ├── test_create_hoc.py │ │ │ ├── test_evaluators.py │ │ │ ├── test_extra_features_utils.py │ │ │ ├── test_features.py │ │ │ ├── test_init.py │ │ │ ├── test_locations.py │ │ │ ├── test_mechanisms.py │ │ │ ├── test_models.py │ │ │ ├── test_morphologies.py │ │ │ ├── test_objectives.py │ │ │ ├── test_parameters.py │ │ │ ├── test_parameterscalers.py │ │ │ ├── test_protocols.py │ │ │ ├── test_recordings.py │ │ │ ├── test_serializer.py │ │ │ ├── test_simulators.py │ │ │ ├── test_stimuli.py │ │ │ ├── testdata/ │ │ │ │ ├── TimeVoltageResponse.csv │ │ │ │ ├── acc/ │ │ │ │ │ ├── CCell/ │ │ │ │ │ │ ├── CCell.json │ │ │ │ │ │ ├── CCell_decor.acc │ │ │ │ │ │ ├── CCell_label_dict.acc │ │ │ │ │ │ └── simple_axon_replacement.acc │ │ │ │ │ ├── expsyn/ │ │ │ │ │ │ ├── simple.swc │ │ │ │ │ │ ├── simple_cell.json │ │ │ │ │ │ ├── simple_cell_decor.acc │ │ │ │ │ │ └── simple_cell_label_dict.acc │ │ │ │ │ ├── l5pc/ │ │ │ │ │ │ ├── C060114A7.asc │ │ │ │ │ │ ├── C060114A7_axon_replacement.acc │ │ │ │ │ │ ├── C060114A7_modified.acc │ │ │ │ │ │ ├── l5pc.json │ │ │ │ │ │ ├── l5pc_decor.acc │ │ │ │ │ │ └── l5pc_label_dict.acc │ │ │ │ │ ├── l5pc_py37/ │ │ │ │ │ │ └── l5pc_decor.acc │ │ │ │ │ ├── simplecell/ │ │ │ │ │ │ ├── simple.swc │ │ │ │ │ │ ├── simple_axon_replacement.acc │ │ │ │ │ │ ├── simple_cell.json │ │ │ │ │ │ ├── simple_cell_decor.acc │ │ │ │ │ │ ├── simple_cell_label_dict.acc │ │ │ │ │ │ └── simple_modified.acc │ │ │ │ │ └── templates/ │ │ │ │ │ ├── cell_json_template.jinja2 │ │ │ │ │ ├── decor_acc_template.jinja2 │ │ │ │ │ └── label_dict_acc_template.jinja2 │ │ │ │ ├── apic.swc │ │ │ │ ├── lfpy_soma_time.npy │ │ │ │ ├── lfpy_soma_voltage.npy │ │ │ │ ├── lfpy_time.npy │ │ │ │ ├── lfpy_voltage.npy │ │ │ │ ├── simple.swc │ │ │ │ ├── simple.wrong │ │ │ │ ├── simple_ax1.swc │ │ │ │ ├── simple_ax2.asc │ │ │ │ ├── simple_ax2.swc │ │ │ │ └── test.jinja2 │ │ │ ├── testmodels/ │ │ │ │ ├── __init__.py │ │ │ │ └── dummycells.py │ │ │ └── utils.py │ │ ├── test_evaluators.py │ │ ├── test_l5pc.py │ │ ├── test_lfpy.py │ │ ├── test_neuroml_fcts.py │ │ ├── test_parameters.py │ │ ├── test_simplecell.py │ │ ├── test_stochkv.py │ │ ├── test_tools.py │ │ └── testdata/ │ │ └── l5pc_validate_neuron_arbor/ │ │ └── param_values.json │ └── tools.py ├── cloud-config/ │ ├── README.md │ ├── config/ │ │ ├── amazon/ │ │ │ ├── README.md │ │ │ ├── ansible.cfg │ │ │ ├── create_instance.yaml │ │ │ ├── gather_config.py │ │ │ ├── site.yaml │ │ │ └── vars.yaml │ │ ├── cluster-user/ │ │ │ ├── README.md │ │ │ ├── ansible.cfg │ │ │ ├── hosts │ │ │ ├── site.yaml │ │ │ └── vars.yaml │ │ └── vagrant/ │ │ ├── README.md │ │ ├── Vagrantfile │ │ ├── ansible.cfg │ │ ├── hosts │ │ ├── site.yaml │ │ └── vars.yaml │ └── roles/ │ ├── base/ │ │ └── tasks/ │ │ └── main.yaml │ ├── deap/ │ │ └── tasks/ │ │ └── main.yaml │ ├── granule-example/ │ │ └── tasks/ │ │ └── main.yaml │ ├── neuron/ │ │ └── tasks/ │ │ ├── main.yaml │ │ └── python27.yaml │ └── scoop-master/ │ └── tasks/ │ └── main.yaml ├── codecov.yml ├── docs/ │ ├── .gitignore │ ├── Makefile │ └── source/ │ ├── .gitignore │ ├── _templates/ │ │ └── module.rst │ ├── api.rst │ ├── conf.py │ ├── deapext.rst │ ├── ephys.rst │ ├── index.rst │ └── optimisations.rst ├── examples/ │ ├── BluePyOpt-ipyparallel.md │ ├── README.md │ ├── __init__.py │ ├── cma_strategy/ │ │ └── cma.ipynb │ ├── expsyn/ │ │ ├── .gitignore │ │ ├── ExpSyn.ipynb │ │ ├── ExpSyn_arbor.ipynb │ │ ├── expsyn.py │ │ ├── generate_acc.py │ │ └── simple.swc │ ├── graupnerbrunelstdp/ │ │ ├── checkpoints/ │ │ │ └── .gitignore │ │ ├── figures/ │ │ │ └── .gitignore │ │ ├── gbevaluator.py │ │ ├── graupnerbrunelstdp.ipynb │ │ ├── run_fit.py │ │ ├── stdputil.py │ │ └── test_stdputil.py │ ├── l5pc/ │ │ ├── .gitignore │ │ ├── L5PC.ipynb │ │ ├── L5PC_arbor.ipynb │ │ ├── benchmark/ │ │ │ ├── get_stats.py │ │ │ ├── l5pc_benchmark.sbatch │ │ │ ├── logs/ │ │ │ │ └── .gitignore │ │ │ ├── run_benchmark.sh │ │ │ ├── start.sh │ │ │ └── task_stats.py │ │ ├── cADpyr_76.hoc │ │ ├── checkpoints/ │ │ │ └── .gitignore │ │ ├── config/ │ │ │ ├── features.json │ │ │ ├── fixed_params.json │ │ │ ├── mechanisms.json │ │ │ ├── parameters.json │ │ │ ├── params.json │ │ │ └── protocols.json │ │ ├── convert_noise_exp.py │ │ ├── convert_params.py │ │ ├── create_tables.py │ │ ├── exp_data/ │ │ │ ├── .gitignore │ │ │ └── noise_i.txt │ │ ├── figures/ │ │ │ └── .gitignore │ │ ├── generate_acc.py │ │ ├── generate_hoc.py │ │ ├── hocmodel.py │ │ ├── l5pc_analysis.py │ │ ├── l5pc_evaluator.py │ │ ├── l5pc_model.py │ │ ├── l5pc_validate_neuron_arbor.ipynb │ │ ├── l5pc_validate_neuron_arbor_pm.py │ │ ├── mechanisms/ │ │ │ ├── CaDynamics_E2.mod │ │ │ ├── Ca_HVA.mod │ │ │ ├── Ca_LVAst.mod │ │ │ ├── Ih.mod │ │ │ ├── Im.mod │ │ │ ├── K_Pst.mod │ │ │ ├── K_Tst.mod │ │ │ ├── LICENSE │ │ │ ├── NaTa_t.mod │ │ │ ├── NaTs2_t.mod │ │ │ ├── Nap_Et2.mod │ │ │ ├── SK_E2.mod │ │ │ ├── SKv3_1.mod │ │ │ └── dummy.inc │ │ ├── morphology/ │ │ │ ├── C060114A7.asc │ │ │ └── LICENSE │ │ ├── nsg/ │ │ │ ├── .gitignore │ │ │ ├── Makefile │ │ │ └── init.py │ │ ├── opt_l5pc.py │ │ ├── opt_l5pc.sh │ │ ├── tables/ │ │ │ └── .gitignore │ │ └── tasks2dataframe.py │ ├── l5pc_lfpy/ │ │ ├── L5PC_LFPy.ipynb │ │ ├── __init__.py │ │ ├── extra_features.json │ │ ├── generate_extra_features.py │ │ ├── l5pc_lfpy_evaluator.py │ │ └── l5pc_lfpy_model.py │ ├── metaparameters/ │ │ ├── .gitignore │ │ ├── metaparameters.ipynb │ │ └── twocompartment.swc │ ├── neuroml/ │ │ └── neuroml.ipynb │ ├── simplecell/ │ │ ├── .gitignore │ │ ├── checkpoints/ │ │ │ └── .gitignore │ │ ├── figures/ │ │ │ └── .gitignore │ │ ├── generate_acc.py │ │ ├── generate_hoc.py │ │ ├── responses.pkl │ │ ├── simple.swc │ │ ├── simplecell-paperfig.ipynb │ │ ├── simplecell.ipynb │ │ ├── simplecell_arbor.ipynb │ │ └── simplecell_model.py │ ├── stochkv/ │ │ ├── .gitignore │ │ ├── mechanisms/ │ │ │ ├── StochKv.mod │ │ │ ├── StochKv3.mod │ │ │ └── dummy.inc │ │ ├── morphology/ │ │ │ └── simple.swc │ │ ├── stochkv3cell.hoc │ │ ├── stochkv3cell.py │ │ ├── stochkv3cell_det.hoc │ │ ├── stochkvcell.hoc │ │ ├── stochkvcell.py │ │ └── stochkvcell_det.hoc │ ├── thalamocortical-cell/ │ │ ├── CellEvalSetup/ │ │ │ ├── __init__.py │ │ │ ├── evaluator.py │ │ │ ├── protocols.py │ │ │ ├── template.py │ │ │ └── tools.py │ │ ├── LICENSE.txt │ │ ├── checkpoints/ │ │ │ └── checkpoint.pkl │ │ ├── config/ │ │ │ ├── features/ │ │ │ │ ├── cAD_ltb.json │ │ │ │ └── cNAD_ltb.json │ │ │ ├── params/ │ │ │ │ └── TC.json │ │ │ ├── protocols/ │ │ │ │ ├── cAD_ltb.json │ │ │ │ └── cNAD_ltb.json │ │ │ └── recipes.json │ │ ├── mechanisms/ │ │ │ ├── SK_E2.mod │ │ │ ├── TC_HH.mod │ │ │ ├── TC_ITGHK_Des98.mod │ │ │ ├── TC_Ih_Bud97.mod │ │ │ ├── TC_Nap_Et2.mod │ │ │ ├── TC_cadecay.mod │ │ │ ├── TC_iA.mod │ │ │ └── TC_iL.mod │ │ ├── morphologies/ │ │ │ ├── jy160728_A_idA.asc │ │ │ └── jy170517_A_idA.asc │ │ ├── results/ │ │ │ ├── cAD_ltb_params.csv │ │ │ └── cNAD_ltb_params.csv │ │ └── thalamocortical-cell_opt.ipynb │ └── tsodyksmarkramstp/ │ ├── AUTHORS.txt │ ├── README.md │ ├── amps.pkl │ ├── tmevaluator.py │ ├── tmevaluator_multiplefreqs.py │ ├── tmodeint.py │ ├── tmodesolve.py │ ├── trace.pkl │ ├── tsodyksmarkramstp.ipynb │ └── tsodyksmarkramstp_multiplefreqs.ipynb ├── misc/ │ ├── github_wiki/ │ │ ├── bibtex/ │ │ │ ├── mentions_BPO.bib │ │ │ ├── mentions_BPO_extra.bib │ │ │ ├── poster_uses_BPO.bib │ │ │ ├── thesis_mentions_BPO.bib │ │ │ ├── thesis_uses_BPO.bib │ │ │ ├── uses_BPO.bib │ │ │ └── uses_BPO_extra.bib │ │ └── creates_publication_list_markdown.py │ └── pytest_migration/ │ └── convert_pytest.sh ├── package.json ├── pyproject.toml ├── pytest.ini ├── requirements.txt ├── requirements_docs.txt └── tox.ini
SYMBOL INDEX (1034 symbols across 118 files)
FILE: bluepyopt/deapext/CMA_MO.py
function get_hyped (line 40) | def get_hyped(pop, ubound_score=250., threshold_improvement=240.):
class CMA_MO (line 79) | class CMA_MO(cma.StrategyMultiObjective):
method __init__ (line 82) | def __init__(
method _select (line 177) | def _select(self, candidates):
method get_population (line 211) | def get_population(self, to_space):
method get_parents (line 219) | def get_parents(self, to_space):
method generate_new_pop (line 227) | def generate_new_pop(self, lbounds, ubounds):
method update_strategy (line 232) | def update_strategy(self):
method set_fitness (line 235) | def set_fitness(self, fitnesses):
method set_fitness_parents (line 239) | def set_fitness_parents(self, fitnesses):
method check_termination (line 243) | def check_termination(self, gen):
FILE: bluepyopt/deapext/CMA_SO.py
class CMA_SO (line 49) | class CMA_SO(cma.Strategy):
method __init__ (line 52) | def __init__(
method update (line 127) | def update(self, population):
method get_population (line 191) | def get_population(self, to_space):
method generate_new_pop (line 199) | def generate_new_pop(self, lbounds, ubounds):
method update_strategy (line 204) | def update_strategy(self):
method set_fitness (line 207) | def set_fitness(self, fitnesses):
method check_termination (line 211) | def check_termination(self, gen):
FILE: bluepyopt/deapext/algorithms.py
function _define_fitness (line 41) | def _define_fitness(pop, obj_size):
function _evaluate_invalid_fitness (line 55) | def _evaluate_invalid_fitness(toolbox, population):
function _get_offspring (line 68) | def _get_offspring(parents, toolbox, cxpb, mutpb):
function _check_stopping_criteria (line 75) | def _check_stopping_criteria(criteria, params):
function eaAlphaMuPlusLambdaCheckpoint (line 86) | def eaAlphaMuPlusLambdaCheckpoint(
FILE: bluepyopt/deapext/hype.py
function hypesub (line 22) | def hypesub(la, A, actDim, bounds, pvec, alpha, k):
function hypeIndicatorExact (line 50) | def hypeIndicatorExact(points, bounds, k):
function hypeIndicatorSampled (line 77) | def hypeIndicatorSampled(points, bounds, k, nrOfSamples):
FILE: bluepyopt/deapext/optimisations.py
class WeightedSumFitness (line 47) | class WeightedSumFitness(deap.base.Fitness):
method __init__ (line 51) | def __init__(self, values=(), obj_size=None):
method weighted_sum (line 57) | def weighted_sum(self):
method sum (line 62) | def sum(self):
method __le__ (line 66) | def __le__(self, other):
method __lt__ (line 69) | def __lt__(self, other):
method __deepcopy__ (line 72) | def __deepcopy__(self, _):
class WSListIndividual (line 81) | class WSListIndividual(list):
method __init__ (line 85) | def __init__(self, *args, **kwargs):
class DEAPOptimisation (line 92) | class DEAPOptimisation(bluepyopt.optimisations.Optimisation):
method __init__ (line 96) | def __init__(self, evaluator=None,
method setup_deap (line 148) | def setup_deap(self):
method run (line 253) | def run(self,
class IBEADEAPOptimisation (line 331) | class IBEADEAPOptimisation(DEAPOptimisation):
method __init__ (line 335) | def __init__(self, *args, **kwargs):
FILE: bluepyopt/deapext/optimisationsCMA.py
function _ind_convert_space (line 42) | def _ind_convert_space(ind, convert_fcn):
class DEAPOptimisationCMA (line 48) | class DEAPOptimisationCMA(bluepyopt.optimisations.Optimisation):
method __init__ (line 52) | def __init__(
method setup_deap (line 178) | def setup_deap(self):
method run (line 243) | def run(
method get_stats (line 386) | def get_stats(self):
FILE: bluepyopt/deapext/stoppingCriteria.py
function isclose (line 34) | def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
class MaxNGen (line 38) | class MaxNGen(bluepyopt.stoppingCriteria.StoppingCriteria):
method __init__ (line 43) | def __init__(self, max_ngen):
method check (line 48) | def check(self, kwargs):
class Stagnation (line 55) | class Stagnation(bluepyopt.stoppingCriteria.StoppingCriteria):
method __init__ (line 60) | def __init__(self, lambda_, problem_size):
method check (line 71) | def check(self, kwargs):
class Stagnationv2 (line 100) | class Stagnationv2(bluepyopt.stoppingCriteria.StoppingCriteria):
method __init__ (line 105) | def __init__(
method check (line 130) | def check(self, kwargs):
class TolHistFun (line 160) | class TolHistFun(bluepyopt.stoppingCriteria.StoppingCriteria):
method __init__ (line 165) | def __init__(self, lambda_, problem_size):
method check (line 172) | def check(self, kwargs):
class EqualFunVals (line 185) | class EqualFunVals(bluepyopt.stoppingCriteria.StoppingCriteria):
method __init__ (line 190) | def __init__(self, lambda_, problem_size):
method check (line 198) | def check(self, kwargs):
class TolX (line 219) | class TolX(bluepyopt.stoppingCriteria.StoppingCriteria):
method __init__ (line 224) | def __init__(self):
method check (line 229) | def check(self, kwargs):
class TolUpSigma (line 239) | class TolUpSigma(bluepyopt.stoppingCriteria.StoppingCriteria):
method __init__ (line 244) | def __init__(self, sigma0):
method check (line 250) | def check(self, kwargs):
class ConditionCov (line 259) | class ConditionCov(bluepyopt.stoppingCriteria.StoppingCriteria):
method __init__ (line 264) | def __init__(self):
method check (line 269) | def check(self, kwargs):
class NoEffectAxis (line 278) | class NoEffectAxis(bluepyopt.stoppingCriteria.StoppingCriteria):
method __init__ (line 283) | def __init__(self, problem_size):
method check (line 289) | def check(self, kwargs):
class NoEffectCoor (line 307) | class NoEffectCoor(bluepyopt.stoppingCriteria.StoppingCriteria):
method __init__ (line 312) | def __init__(self):
method check (line 316) | def check(self, kwargs):
FILE: bluepyopt/deapext/tools/selIBEA.py
function selIBEA (line 34) | def selIBEA(population, mu, alpha=None, kappa=.05, tournament_n=4):
function _calc_fitness_components (line 55) | def _calc_fitness_components(population, kappa):
function _calc_fitnesses (line 93) | def _calc_fitnesses(population, components):
function _choice (line 104) | def _choice(seq):
function _mating_selection (line 110) | def _mating_selection(population, mu, tournament_n):
function _environmental_selection (line 126) | def _environmental_selection(population, selection_size):
FILE: bluepyopt/deapext/utils.py
class WeightedReducedFitness (line 30) | class WeightedReducedFitness(deap.base.Fitness):
method __init__ (line 34) | def __init__(self, values=(), obj_size=None, reduce_fcn=numpy.sum):
method reduce (line 41) | def reduce(self):
method weighted_reduce (line 46) | def weighted_reduce(self):
method __le__ (line 50) | def __le__(self, other):
method __lt__ (line 53) | def __lt__(self, other):
method __deepcopy__ (line 56) | def __deepcopy__(self, _):
class WSListIndividual (line 65) | class WSListIndividual(list):
method __init__ (line 69) | def __init__(self, *args, **kwargs):
function update_history_and_hof (line 86) | def update_history_and_hof(halloffame, history, population):
function record_stats (line 96) | def record_stats(stats, logbook, gen, population, invalid_count):
function closest_feasible (line 102) | def closest_feasible(individual, lbounds, ubounds):
function bound (line 113) | def bound(population, lbounds, ubounds):
function uniform (line 125) | def uniform(lower_list, upper_list, dimensions):
function reduce_method (line 137) | def reduce_method(meth):
function run_next_gen (line 142) | def run_next_gen(criteria, terminator):
FILE: bluepyopt/ephys/acc.py
class arbor (line 6) | class arbor:
method __getattribute__ (line 7) | def __getattribute__(self, _):
class ArbLabel (line 15) | class ArbLabel:
method __init__ (line 18) | def __init__(self, type, name, s_expr):
method defn (line 26) | def defn(self):
method ref (line 31) | def ref(self):
method name (line 36) | def name(self):
method loc (line 41) | def loc(self):
method __eq__ (line 45) | def __eq__(self, other):
method __hash__ (line 53) | def __hash__(self):
method __repr__ (line 56) | def __repr__(self):
FILE: bluepyopt/ephys/base.py
class BaseEPhys (line 4) | class BaseEPhys(object):
method __init__ (line 7) | def __init__(self, name='', comment=''):
method __str__ (line 11) | def __str__(self):
FILE: bluepyopt/ephys/create_acc.py
class ArbVar (line 31) | class ArbVar:
method __init__ (line 34) | def __init__(self, name, conv=None):
method __repr__ (line 45) | def __repr__(self):
class Nrn2ArbParamAdapter (line 49) | class Nrn2ArbParamAdapter:
method _param_name (line 75) | def _param_name(cls, name):
method _param_value (line 84) | def _param_value(cls, param):
method _conv_param (line 106) | def _conv_param(cls, param, name):
method format (line 137) | def format(cls, param, mechs):
class Nrn2ArbMechGrouper (line 179) | class Nrn2ArbMechGrouper:
method _is_global_property (line 183) | def _is_global_property(loc, param):
method _separate_global_properties (line 208) | def _separate_global_properties(cls, loc, mechs):
method _format_params_and_group_by_mech (line 240) | def _format_params_and_group_by_mech(params, channels):
method process_global (line 263) | def process_global(cls, params):
method process_local (line 283) | def process_local(cls, params, channels):
function _arb_filter_point_proc_locs (line 319) | def _arb_filter_point_proc_locs(pprocess_mechs):
function _arb_append_scaled_mechs (line 342) | def _arb_append_scaled_mechs(mechs, scaled_mechs):
class ArbNmodlMechFormatter (line 364) | class ArbNmodlMechFormatter:
method __init__ (line 367) | def __init__(self, ext_catalogues):
method _load_catalogue_meta (line 377) | def _load_catalogue_meta(cat_dir):
method _load_mech_catalogue_meta (line 430) | def _load_mech_catalogue_meta(cls, ext_catalogues):
method _mech_name (line 469) | def _mech_name(name):
method _translate_mech (line 481) | def _translate_mech(cls, mech_name, mech_params, arb_cats):
method translate_density (line 551) | def translate_density(self, mechs):
method translate_points (line 560) | def translate_points(self, mechs):
function _arb_project_scaled_mechs (line 573) | def _arb_project_scaled_mechs(mechs):
function _arb_populate_label_dict (line 583) | def _arb_populate_label_dict(local_mechs, local_scaled_mechs, pprocess_m...
function _read_templates (line 616) | def _read_templates(template_dir, template_filename):
function _arb_loc_desc (line 647) | def _arb_loc_desc(location, param_or_mech):
function create_acc (line 652) | def create_acc(
function write_acc (line 855) | def write_acc(
function read_acc (line 915) | def read_acc(cell_json_filename):
class CreateAccException (line 946) | class CreateAccException(Exception):
method __init__ (line 949) | def __init__(self, message):
FILE: bluepyopt/ephys/create_hoc.py
function generate_channels_by_location (line 52) | def generate_channels_by_location(mechs, location_order):
function _generate_channels_by_location (line 65) | def _generate_channels_by_location(mechs, location_order, loc_desc):
function generate_reinitrng (line 79) | def generate_reinitrng(mechs) -> str:
function range_exprs_to_hoc (line 101) | def range_exprs_to_hoc(range_params):
function _loc_desc (line 124) | def _loc_desc(location, param_or_mech):
function generate_parameters (line 151) | def generate_parameters(parameters):
function _generate_parameters (line 164) | def _generate_parameters(parameters, location_order, loc_desc):
function _read_template (line 234) | def _read_template(template_dir, template_filename):
function _get_template_params (line 249) | def _get_template_params(
function create_hoc (line 302) | def create_hoc(mechs,
class CreateHocException (line 359) | class CreateHocException(Exception):
method __init__ (line 363) | def __init__(self, message):
FILE: bluepyopt/ephys/efeatures.py
function masked_cosine_distance (line 34) | def masked_cosine_distance(exp, model):
class EFeature (line 50) | class EFeature(BaseEPhys):
class eFELFeature (line 56) | class eFELFeature(EFeature, DictMixin):
method __init__ (line 64) | def __init__(
method _construct_efel_trace (line 123) | def _construct_efel_trace(self, responses):
method _setup_efel (line 154) | def _setup_efel(self):
method calculate_feature (line 181) | def calculate_feature(self, responses, raise_warnings=False):
method calculate_score (line 207) | def calculate_score(self, responses, trace_check=False):
method __str__ (line 235) | def __str__(self):
class extraFELFeature (line 249) | class extraFELFeature(EFeature, DictMixin):
method __init__ (line 257) | def __init__(
method _construct_somatic_efel_trace (line 355) | def _construct_somatic_efel_trace(self, responses):
method _setup_efel (line 379) | def _setup_efel(self):
method _get_peak_times (line 400) | def _get_peak_times(self, responses, raise_warnings=False):
method calculate_feature (line 420) | def calculate_feature(
method calculate_score (line 494) | def calculate_score(self, responses, trace_check=False):
method __str__ (line 535) | def __str__(self):
function _interpolate_response (line 550) | def _interpolate_response(response, fs=20.0):
function _filter_response (line 566) | def _filter_response(response, fcut=[0.5, 6000], order=2, filt_type="lfi...
function _get_waveforms (line 602) | def _get_waveforms(response, peak_times, snippet_len_ms):
FILE: bluepyopt/ephys/evaluators.py
class CellEvaluator (line 32) | class CellEvaluator(bpopt.evaluators.Evaluator):
method __init__ (line 36) | def __init__(
method param_dict (line 90) | def param_dict(self, param_array):
method objective_dict (line 99) | def objective_dict(self, objective_array):
method objective_list (line 116) | def objective_list(self, objective_dict):
method seed_from_param_dict (line 127) | def seed_from_param_dict(param_dict):
method run_protocol (line 138) | def run_protocol(
method run_protocols (line 171) | def run_protocols(self, protocols, param_values):
method evaluate_with_dicts (line 185) | def evaluate_with_dicts(self, param_dict=None, target='scores'):
method evaluate_with_lists (line 208) | def evaluate_with_lists(self, param_list=None, target='scores'):
method init_simulator_and_evaluate_with_lists (line 219) | def init_simulator_and_evaluate_with_lists(
method evaluate (line 230) | def evaluate(self, param_list=None, target='scores'):
method __str__ (line 235) | def __str__(self):
FILE: bluepyopt/ephys/examples/simplecell/simplecell.py
class SimpleCell (line 8) | class SimpleCell:
method __init__ (line 9) | def __init__(self):
FILE: bluepyopt/ephys/extra_features_utils.py
function calculate_features (line 39) | def calculate_features(
function peak_to_valley (line 133) | def peak_to_valley(waveforms, sampling_frequency):
function peak_trough_ratio (line 157) | def peak_trough_ratio(waveforms):
function halfwidth (line 187) | def halfwidth(waveforms, sampling_frequency, return_idx=False):
function repolarization_slope (line 270) | def repolarization_slope(waveforms, sampling_frequency, return_idx=False):
function recovery_slope (line 332) | def recovery_slope(waveforms, sampling_frequency, window):
function peak_image (line 380) | def peak_image(waveforms, sign="negative"):
function relative_amplitude (line 414) | def relative_amplitude(waveforms, sign="negative"):
function peak_time_diff (line 446) | def peak_time_diff(waveforms, fs, sign="negative"):
function _get_slope (line 479) | def _get_slope(x, y):
function _get_trough_and_peak_idx (line 489) | def _get_trough_and_peak_idx(waveform, after_max_trough=False):
function _upsample_wf (line 515) | def _upsample_wf(waveforms, upsample):
FILE: bluepyopt/ephys/locations.py
class Location (line 37) | class Location(BaseEPhys):
function _nth_isectionlist (line 48) | def _nth_isectionlist(isectionlist, index):
class NrnSeclistCompLocation (line 61) | class NrnSeclistCompLocation(Location, DictMixin):
method __init__ (line 73) | def __init__(
method instantiate (line 94) | def instantiate(self, sim=None, icell=None): # pylint: disable=W0613
method acc_label (line 113) | def acc_label(self):
method __str__ (line 123) | def __str__(self):
class NrnSectionCompLocation (line 129) | class NrnSectionCompLocation(Location, DictMixin):
method __init__ (line 141) | def __init__(
method instantiate (line 159) | def instantiate(self, sim=None, icell=None): # pylint: disable=W0613
method acc_label (line 169) | def acc_label(self):
method __str__ (line 179) | def __str__(self):
class NrnPointProcessLocation (line 183) | class NrnPointProcessLocation(Location):
method __init__ (line 187) | def __init__(
method instantiate (line 202) | def instantiate(self, sim=None, icell=None): # pylint: disable=W0613
method acc_label (line 207) | def acc_label(self):
method __str__ (line 211) | def __str__(self):
class NrnSeclistLocation (line 217) | class NrnSeclistLocation(Location, DictMixin):
method __init__ (line 223) | def __init__(
method instantiate (line 238) | def instantiate(self, sim=None, icell=None): # pylint: disable=W0613
method acc_label (line 245) | def acc_label(self):
method __str__ (line 249) | def __str__(self):
class NrnSeclistSecLocation (line 255) | class NrnSeclistSecLocation(Location, DictMixin):
method __init__ (line 261) | def __init__(
method instantiate (line 279) | def instantiate(self, sim=None, icell=None): # pylint: disable=W0613
method acc_label (line 286) | def acc_label(self):
method __str__ (line 295) | def __str__(self):
class NrnSomaDistanceCompLocation (line 301) | class NrnSomaDistanceCompLocation(Location, DictMixin):
method __init__ (line 307) | def __init__(
method find_icomp (line 327) | def find_icomp(self, sim, iseclist):
method instantiate (line 355) | def instantiate(self, sim=None, icell=None):
method acc_label (line 366) | def acc_label(self):
method __str__ (line 390) | def __str__(self):
class NrnSecSomaDistanceCompLocation (line 397) | class NrnSecSomaDistanceCompLocation(NrnSomaDistanceCompLocation):
method __init__ (line 405) | def __init__(
method instantiate (line 429) | def instantiate(self, sim=None, icell=None):
method acc_label (line 462) | def acc_label(self):
class NrnTrunkSomaDistanceCompLocation (line 468) | class NrnTrunkSomaDistanceCompLocation(NrnSecSomaDistanceCompLocation):
method __init__ (line 481) | def __init__(
method set_sec_index (line 512) | def set_sec_index(self, icell=None):
method instantiate (line 534) | def instantiate(self, sim=None, icell=None):
method acc_label (line 540) | def acc_label(self):
class ArbLocation (line 546) | class ArbLocation(Location):
method instantiate (line 549) | def instantiate(self, sim=None, icell=None): # pylint: disable=W0613
method __str__ (line 554) | def __str__(self):
class ArbSegmentLocation (line 559) | class ArbSegmentLocation(ArbLocation):
method __init__ (line 563) | def __init__(self, name, segment, comment=''):
method acc_label (line 567) | def acc_label(self):
class ArbBranchLocation (line 572) | class ArbBranchLocation(ArbLocation):
method __init__ (line 578) | def __init__(self, name, branch, comment=''):
method acc_label (line 582) | def acc_label(self):
class ArbSegmentRelLocation (line 587) | class ArbSegmentRelLocation(ArbLocation):
method __init__ (line 591) | def __init__(self, name, segment, pos, comment=''):
method acc_label (line 596) | def acc_label(self):
class ArbBranchRelLocation (line 603) | class ArbBranchRelLocation(ArbLocation):
method __init__ (line 609) | def __init__(self, name, branch, pos, comment=''):
method acc_label (line 614) | def acc_label(self):
class ArbLocsetLocation (line 621) | class ArbLocsetLocation(ArbLocation):
method __init__ (line 625) | def __init__(self, name, locset, comment=''):
method acc_label (line 629) | def acc_label(self):
class ArbRegionLocation (line 634) | class ArbRegionLocation(ArbLocation):
method __init__ (line 638) | def __init__(self, name, region, comment=''):
method acc_label (line 642) | def acc_label(self):
class EPhysLocInstantiateException (line 647) | class EPhysLocInstantiateException(Exception):
method __init__ (line 651) | def __init__(self, message):
class EPhysLocAccException (line 657) | class EPhysLocAccException(Exception):
method __init__ (line 661) | def __init__(self, message):
FILE: bluepyopt/ephys/mechanisms.py
class Mechanism (line 38) | class Mechanism(base.BaseEPhys):
class NrnMODMechanism (line 44) | class NrnMODMechanism(Mechanism, serializer.DictMixin):
method __init__ (line 57) | def __init__(
method instantiate (line 96) | def instantiate(self, sim=None, icell=None):
method instantiate_determinism (line 116) | def instantiate_determinism(self, deterministic, icell, isec, sim):
method destroy (line 147) | def destroy(self, sim=None):
method __str__ (line 152) | def __str__(self):
method hash_hoc (line 160) | def hash_hoc(string, sim):
method hash_py (line 170) | def hash_py(string):
method generate_reinitrng_hoc_block (line 180) | def generate_reinitrng_hoc_block(self):
method prefix (line 206) | def prefix(self):
method prefix (line 212) | def prefix(self, value):
class NrnMODPointProcessMechanism (line 270) | class NrnMODPointProcessMechanism(Mechanism):
method __init__ (line 274) | def __init__(
method instantiate (line 303) | def instantiate(self, sim=None, icell=None):
method destroy (line 319) | def destroy(self, sim=None):
method __str__ (line 324) | def __str__(self):
FILE: bluepyopt/ephys/models.py
class Model (line 41) | class Model(object):
method __init__ (line 45) | def __init__(self, name):
method instantiate (line 52) | def instantiate(self, sim=None):
method destroy (line 56) | def destroy(self, sim=None):
class CellModel (line 61) | class CellModel(Model):
method __init__ (line 65) | def __init__(
method check_name (line 124) | def check_name(self):
method params_by_names (line 144) | def params_by_names(self, param_names):
method freeze (line 149) | def freeze(self, param_dict):
method unfreeze (line 155) | def unfreeze(self, param_names):
method create_empty_template (line 162) | def create_empty_template(
method create_empty_cell (line 207) | def create_empty_cell(
method instantiate_morphology (line 226) | def instantiate_morphology(self, sim=None):
method instantiate_morphology_3d (line 247) | def instantiate_morphology_3d(self, sim=None):
method instantiate (line 253) | def instantiate(self, sim=None):
method destroy (line 267) | def destroy(self, sim=None): # pylint: disable=W0613
method check_nonfrozen_params (line 290) | def check_nonfrozen_params(self, param_names): # pylint: disable=W0613
method _create_sim_desc (line 300) | def _create_sim_desc(self, param_values,
method create_hoc (line 371) | def create_hoc(self, param_values,
method create_acc (line 382) | def create_acc(self, param_values,
method write_acc (line 418) | def write_acc(self, output_dir, param_values,
method __str__ (line 430) | def __str__(self):
class HocMorphology (line 453) | class HocMorphology(morphologies.Morphology):
method __init__ (line 457) | def __init__(self, morphology_path):
class HocCellModel (line 465) | class HocCellModel(CellModel):
method __init__ (line 469) | def __init__(self, name, morphology_path, hoc_path=None, hoc_string=No...
method params_by_names (line 502) | def params_by_names(self, param_names):
method freeze (line 505) | def freeze(self, param_dict):
method unfreeze (line 508) | def unfreeze(self, param_names):
method instantiate (line 511) | def instantiate(self, sim=None):
method destroy (line 529) | def destroy(self, sim=None):
method check_nonfrozen_params (line 533) | def check_nonfrozen_params(self, param_names):
method __str__ (line 536) | def __str__(self):
method get_template_name (line 546) | def get_template_name(hoc_string):
method load_hoc_template (line 564) | def load_hoc_template(sim, hoc_string):
class LFPyCellModel (line 582) | class LFPyCellModel(Model):
method __init__ (line 586) | def __init__(
method check_name (line 654) | def check_name(self):
method params_by_names (line 677) | def params_by_names(self, param_names):
method freeze (line 682) | def freeze(self, param_dict):
method unfreeze (line 688) | def unfreeze(self, param_names):
method create_empty_template (line 695) | def create_empty_template(
method create_empty_cell (line 744) | def create_empty_cell(name, sim, seclist_names=None, secarray_names=No...
method instantiate (line 758) | def instantiate(self, sim=None):
method destroy (line 800) | def destroy(self, sim=None): # pylint: disable=W0613
method check_nonfrozen_params (line 822) | def check_nonfrozen_params(self, param_names): # pylint: disable=W0613
method create_hoc (line 832) | def create_hoc(
method __str__ (line 871) | def __str__(self):
FILE: bluepyopt/ephys/morphologies.py
class Morphology (line 39) | class Morphology(BaseEPhys):
class NrnFileMorphology (line 45) | class NrnFileMorphology(Morphology, DictMixin):
method __init__ (line 53) | def __init__(
method __str__ (line 112) | def __str__(self):
method instantiate (line 117) | def instantiate(self, sim=None, icell=None):
method destroy (line 173) | def destroy(self, sim=None):
method set_nseg (line 177) | def set_nseg(self, icell):
method replace_axon (line 183) | def replace_axon(sim=None, icell=None,
class ArbFileMorphology (line 275) | class ArbFileMorphology(Morphology, DictMixin):
method load (line 306) | def load(morpho_filename, replace_axon):
method extract_nrn_seclists (line 337) | def extract_nrn_seclists(icell, seclists):
method replace_axon (line 408) | def replace_axon(morphology, replacement=None):
FILE: bluepyopt/ephys/objectives.py
class EFeatureObjective (line 25) | class EFeatureObjective(bluepyopt.objectives.Objective):
method __init__ (line 29) | def __init__(self, name, features=None):
method calculate_feature_scores (line 41) | def calculate_feature_scores(self, responses):
method calculate_feature_values (line 50) | def calculate_feature_values(self, responses):
class SingletonObjective (line 60) | class SingletonObjective(EFeatureObjective):
method __init__ (line 64) | def __init__(self, name, feature):
method calculate_score (line 74) | def calculate_score(self, responses):
method calculate_value (line 79) | def calculate_value(self, responses):
method __str__ (line 84) | def __str__(self):
class SingletonWeightObjective (line 90) | class SingletonWeightObjective(SingletonObjective):
method __init__ (line 94) | def __init__(self, name, feature, weight):
method calculate_score (line 105) | def calculate_score(self, responses):
method __str__ (line 110) | def __str__(self):
class MaxObjective (line 116) | class MaxObjective(EFeatureObjective):
method calculate_score (line 120) | def calculate_score(self, responses):
class WeightedSumObjective (line 126) | class WeightedSumObjective(EFeatureObjective):
method __init__ (line 130) | def __init__(self, name, features, weights):
method calculate_score (line 146) | def calculate_score(self, responses):
FILE: bluepyopt/ephys/objectivescalculators.py
class ObjectivesCalculator (line 23) | class ObjectivesCalculator(object):
method __init__ (line 27) | def __init__(
method calculate_scores (line 38) | def calculate_scores(self, responses):
method calculate_values (line 44) | def calculate_values(self, responses):
method __str__ (line 50) | def __str__(self):
FILE: bluepyopt/ephys/parameters.py
class NrnParameter (line 35) | class NrnParameter(bluepyopt.parameters.Parameter):
method __init__ (line 39) | def __init__(
method instantiate (line 55) | def instantiate(self, sim=None, icell=None, params=None):
method destroy (line 59) | def destroy(self, sim=None):
class MetaParameter (line 64) | class MetaParameter(NrnParameter):
method __init__ (line 68) | def __init__(
method value (line 91) | def value(self, _value):
method __str__ (line 97) | def __str__(self):
class NrnMetaListEqualParameter (line 105) | class NrnMetaListEqualParameter(bluepyopt.parameters.MetaListEqualParame...
method instantiate (line 108) | def instantiate(self, sim=None, icell=None, params=None):
method destroy (line 116) | def destroy(self, sim=None):
class NrnGlobalParameter (line 122) | class NrnGlobalParameter(NrnParameter, DictMixin):
method __init__ (line 127) | def __init__(
method instantiate (line 156) | def instantiate(self, sim=None, icell=None, params=None):
method __str__ (line 163) | def __str__(self):
class NrnSectionParameter (line 170) | class NrnSectionParameter(NrnParameter, DictMixin):
method __init__ (line 176) | def __init__(
method instantiate (line 219) | def instantiate(self, sim=None, icell=None, params=None):
method __str__ (line 242) | def __str__(self):
class NrnPointProcessParameter (line 251) | class NrnPointProcessParameter(NrnParameter, DictMixin):
method __init__ (line 258) | def __init__(
method instantiate (line 293) | def instantiate(self, sim=None, icell=None, params=None):
method __str__ (line 309) | def __str__(self):
class NrnRangeParameter (line 319) | class NrnRangeParameter(NrnParameter, DictMixin):
method __init__ (line 325) | def __init__(
method instantiate (line 367) | def instantiate(self, sim=None, icell=None, params=None):
method __str__ (line 390) | def __str__(self):
FILE: bluepyopt/ephys/parameterscalers/acc_iexpr.py
class ArbIExprValueEliminator (line 27) | class ArbIExprValueEliminator(ast.NodeTransformer):
method __init__ (line 30) | def __init__(self, variable_name, value):
method generic_visit (line 37) | def generic_visit(self, node):
method _is_linear (line 66) | def _is_linear(self, node):
method visit_Name (line 78) | def visit_Name(self, node):
class ArbIExprEmitter (line 91) | class ArbIExprEmitter(ast.NodeVisitor):
method __init__ (line 107) | def __init__(self, var_name_to_sexpr, constant_formatter):
method emit (line 113) | def emit(self):
method _emit (line 116) | def _emit(self, expr):
method generic_visit (line 119) | def generic_visit(self, node):
method visit_Constant (line 131) | def visit_Constant(self, node):
method visit_Num (line 137) | def visit_Num(self, node):
method visit_Attribute (line 143) | def visit_Attribute(self, node):
method visit_UnaryOp (line 152) | def visit_UnaryOp(self, node):
method visit_BinOp (line 166) | def visit_BinOp(self, node):
method visit_Call (line 180) | def visit_Call(self, node):
method visit_Name (line 211) | def visit_Name(self, node):
function generate_acc_scale_iexpr (line 221) | def generate_acc_scale_iexpr(iexpr, variables, constant_formatter):
FILE: bluepyopt/ephys/parameterscalers/parameterscalers.py
function format_float (line 34) | def format_float(value):
class MissingFormatDict (line 39) | class MissingFormatDict(dict):
method __missing__ (line 43) | def __missing__(self, key): # pylint: disable=R0201
class ParameterScaler (line 48) | class ParameterScaler(BaseEPhys):
class NrnSegmentLinearScaler (line 56) | class NrnSegmentLinearScaler(ParameterScaler, DictMixin):
method __init__ (line 61) | def __init__(
method scale (line 79) | def scale(self, values, segment=None, sim=None): # pylint: disable=W0613
method __str__ (line 87) | def __str__(self):
class NrnSegmentSectionDistanceScaler (line 93) | class NrnSegmentSectionDistanceScaler(ParameterScaler, DictMixin):
method __init__ (line 100) | def __init__(
method inst_distribution (line 148) | def inst_distribution(self):
method scale_dict (line 164) | def scale_dict(self, values, distance):
method eval_dist (line 176) | def eval_dist(self, values, distance):
method scale (line 182) | def scale(self, values, segment, sim=None):
method acc_scale_iexpr (line 212) | def acc_scale_iexpr(self, value, constant_formatter=format_float):
method __str__ (line 218) | def __str__(self):
class NrnSegmentSomaDistanceScaler (line 224) | class NrnSegmentSomaDistanceScaler(NrnSegmentSectionDistanceScaler,
method __init__ (line 230) | def __init__(
method acc_scale_iexpr (line 258) | def acc_scale_iexpr(self, value, constant_formatter=format_float):
class NrnSegmentSomaDistanceStepScaler (line 271) | class NrnSegmentSomaDistanceStepScaler(NrnSegmentSomaDistanceScaler,
method __init__ (line 277) | def __init__(
method scale_dict (line 311) | def scale_dict(self, values, distance):
FILE: bluepyopt/ephys/protocols.py
class Protocol (line 45) | class Protocol(object):
method __init__ (line 49) | def __init__(self, name=None):
class SequenceProtocol (line 59) | class SequenceProtocol(Protocol):
method __init__ (line 63) | def __init__(self, name=None, protocols=None):
method run (line 74) | def run(
method subprotocols (line 117) | def subprotocols(self):
method __str__ (line 127) | def __str__(self):
class SweepProtocol (line 139) | class SweepProtocol(Protocol):
method __init__ (line 143) | def __init__(
method total_duration (line 169) | def total_duration(self):
method subprotocols (line 174) | def subprotocols(self):
method adjust_stochasticity (line 179) | def adjust_stochasticity(func):
method _run_func (line 199) | def _run_func(self, cell_model, param_values, sim=None):
method run (line 243) | def run(
method instantiate (line 307) | def instantiate(self, sim=None, cell_model=None):
method destroy (line 330) | def destroy(self, sim=None):
method __str__ (line 339) | def __str__(self):
class StepProtocol (line 355) | class StepProtocol(SweepProtocol):
method __init__ (line 359) | def __init__(
method step_delay (line 392) | def step_delay(self):
method step_duration (line 397) | def step_duration(self):
class ArbSweepProtocol (line 402) | class ArbSweepProtocol(Protocol):
method __init__ (line 406) | def __init__(
method total_duration (line 428) | def total_duration(self):
method subprotocols (line 433) | def subprotocols(self):
method _run_func (line 438) | def _run_func(self, cell_json, param_values, sim=None):
method run (line 494) | def run(
method instantiate_locations (line 551) | def instantiate_locations(self, label_dict):
method instantiate_iclamp_stimuli (line 599) | def instantiate_iclamp_stimuli(self, decor, use_labels=False):
method instantiate_synaptic_stimuli (line 625) | def instantiate_synaptic_stimuli(self, cell_model, use_labels=False):
method instantiate_recordings (line 635) | def instantiate_recordings(self, cell_model, use_labels=False):
method __str__ (line 662) | def __str__(self):
class SweepProtocolException (line 678) | class SweepProtocolException(Exception):
method __init__ (line 682) | def __init__(self, message):
class ArbSweepProtocolException (line 688) | class ArbSweepProtocolException(Exception):
method __init__ (line 692) | def __init__(self, message):
FILE: bluepyopt/ephys/recordings.py
class Recording (line 30) | class Recording(object):
method __init__ (line 34) | def __init__(self, name=None):
class CompRecording (line 44) | class CompRecording(Recording):
method __init__ (line 48) | def __init__(
method response (line 75) | def response(self):
method instantiate (line 85) | def instantiate(self, sim=None, icell=None):
method destroy (line 100) | def destroy(self, sim=None):
method __str__ (line 107) | def __str__(self):
class LFPRecording (line 113) | class LFPRecording(Recording):
method __init__ (line 120) | def __init__(self, name=None):
method response (line 137) | def response(self):
method instantiate (line 147) | def instantiate(self, sim=None, lfpy_cell=None, electrode=None):
method destroy (line 166) | def destroy(self, sim=None):
method __str__ (line 176) | def __str__(self):
FILE: bluepyopt/ephys/responses.py
class Response (line 26) | class Response(object):
method __init__ (line 30) | def __init__(self, name):
method __str__ (line 40) | def __str__(self):
class TimeVoltageResponse (line 44) | class TimeVoltageResponse(Response):
method __init__ (line 48) | def __init__(self, name, time=None, voltage=None):
method read_csv (line 63) | def read_csv(self, filename):
method to_csv (line 68) | def to_csv(self, filename):
method __getitem__ (line 73) | def __getitem__(self, index):
method plot (line 79) | def plot(self, axes):
class TimeLFPResponse (line 89) | class TimeLFPResponse(TimeVoltageResponse):
method __init__ (line 93) | def __init__(self, name, time=None, lfp=None):
method plot (line 106) | def plot(self, axes):
FILE: bluepyopt/ephys/serializer.py
class DictMixin (line 11) | class DictMixin(object):
method _serializer (line 17) | def _serializer(value):
method _deserializer (line 31) | def _deserializer(value):
method to_dict (line 44) | def to_dict(self):
method from_dict (line 53) | def from_dict(cls, fields):
function instantiator (line 64) | def instantiator(fields):
FILE: bluepyopt/ephys/simulators.py
class NrnSimulator (line 18) | class NrnSimulator(object):
method __init__ (line 21) | def __init__(
method cvode (line 63) | def cvode(self):
method cvode_minstep (line 69) | def cvode_minstep(self):
method cvode_minstep (line 75) | def cvode_minstep(self, value):
method _nrn_disable_banner (line 81) | def _nrn_disable_banner():
method neuron (line 102) | def neuron(self):
method initialize (line 118) | def initialize(self):
method run (line 124) | def run(
class NrnSimulatorException (line 187) | class NrnSimulatorException(Exception):
method __init__ (line 190) | def __init__(self, message, original):
class LFPySimulator (line 197) | class LFPySimulator(NrnSimulator):
method __init__ (line 200) | def __init__(
method run (line 232) | def run(
class LFPySimulatorException (line 293) | class LFPySimulatorException(Exception):
method __init__ (line 296) | def __init__(self, message, original):
class ArbSimulator (line 303) | class ArbSimulator(object):
method __init__ (line 306) | def __init__(self, dt=None, ext_catalogues=None):
method initialize (line 330) | def initialize(self):
method instantiate (line 334) | def instantiate(self, morph, decor, labels):
method run (line 371) | def run(self, arb_cell_model, tstop=None, dt=None):
class ArbSimulatorException (line 382) | class ArbSimulatorException(Exception):
method __init__ (line 385) | def __init__(self, message):
FILE: bluepyopt/ephys/stimuli.py
class Stimulus (line 32) | class Stimulus(object):
class SynapticStimulus (line 38) | class SynapticStimulus(Stimulus):
class LFPStimulus (line 44) | class LFPStimulus(Stimulus):
method instantiate (line 48) | def instantiate(self, sim=None, lfpy_cell=None):
class NrnCurrentPlayStimulus (line 53) | class NrnCurrentPlayStimulus(Stimulus):
method __init__ (line 57) | def __init__(self,
method envelope (line 78) | def envelope(self):
method instantiate (line 85) | def instantiate(self, sim=None, icell=None):
method destroy (line 105) | def destroy(self, sim=None):
method __str__ (line 112) | def __str__(self):
class NrnNetStimStimulus (line 118) | class NrnNetStimStimulus(SynapticStimulus):
method __init__ (line 122) | def __init__(self,
method instantiate (line 156) | def instantiate(self, sim=None, icell=None):
method destroy (line 172) | def destroy(self, sim=None):
method acc_events (line 177) | def acc_events(self):
method __str__ (line 203) | def __str__(self):
class NrnSquarePulse (line 214) | class NrnSquarePulse(Stimulus):
method __init__ (line 218) | def __init__(self,
method envelope (line 242) | def envelope(self):
method instantiate (line 255) | def instantiate(self, sim=None, icell=None):
method destroy (line 274) | def destroy(self, sim=None):
method __str__ (line 279) | def __str__(self):
class NrnRampPulse (line 290) | class NrnRampPulse(Stimulus):
method __init__ (line 294) | def __init__(self,
method envelope (line 322) | def envelope(self):
method instantiate (line 347) | def instantiate(self, sim=None, icell=None):
method destroy (line 381) | def destroy(self, sim=None):
method __str__ (line 388) | def __str__(self):
class LFPySquarePulse (line 401) | class LFPySquarePulse(LFPStimulus):
method __init__ (line 405) | def __init__(self,
method instantiate (line 428) | def instantiate(self, sim=None, lfpy_cell=None):
method destroy (line 469) | def destroy(self, sim=None):
method __str__ (line 474) | def __str__(self):
FILE: bluepyopt/evaluators.py
class Evaluator (line 25) | class Evaluator(object):
method __init__ (line 43) | def __init__(self, objectives=None, params=None):
method evaluate_with_dicts (line 49) | def evaluate_with_dicts(self, param_dict):
method evaluate_with_lists (line 63) | def evaluate_with_lists(self, params):
FILE: bluepyopt/ipyp/bpopt_tasksdb.py
function get_engine_data (line 35) | def get_engine_data(tasksdb_filename):
function plot_usage (line 67) | def plot_usage(tasks, engine_number_map):
function plot_duration_histogram (line 92) | def plot_duration_histogram(tasks):
function calculate_unused_compute (line 108) | def calculate_unused_compute(tasks):
function run (line 126) | def run(arg_list):
function main (line 143) | def main():
FILE: bluepyopt/neuroml/biophys.py
function get_nml_mech_dir (line 69) | def get_nml_mech_dir():
function adapt_CaDynamics_nml (line 76) | def adapt_CaDynamics_nml(
function get_channel_from_param_name (line 122) | def get_channel_from_param_name(param_name):
function format_dist_fun (line 143) | def format_dist_fun(raw_expr, value, dist_param_names):
function add_nml_channel_to_nml_cell_file (line 166) | def add_nml_channel_to_nml_cell_file(
function get_channel_ion (line 210) | def get_channel_ion(channel, custom_channel_ion=None):
function get_erev (line 228) | def get_erev(ion, custom_ion_erevs=None):
function get_arguments (line 246) | def get_arguments(
function extract_parameter_value (line 309) | def extract_parameter_value(
function get_density (line 365) | def get_density(
function get_specific_capacitance (line 431) | def get_specific_capacitance(capacitance_overwrites):
function get_biophys (line 456) | def get_biophys(
FILE: bluepyopt/neuroml/cell.py
function create_neuroml_cell (line 30) | def create_neuroml_cell(
FILE: bluepyopt/neuroml/morphology.py
function create_loadcell_hoc (line 28) | def create_loadcell_hoc(
function create_morph_nml (line 66) | def create_morph_nml(bpo_cell, network_filename, release_params):
function add_segment_groups (line 116) | def add_segment_groups(cell):
FILE: bluepyopt/neuroml/simulation.py
function create_neuroml_simulation (line 29) | def create_neuroml_simulation(
FILE: bluepyopt/objectives.py
class Objective (line 23) | class Objective(object):
method __init__ (line 27) | def __init__(self, name, value=None):
FILE: bluepyopt/optimisations.py
class Optimisation (line 23) | class Optimisation(object):
method __init__ (line 29) | def __init__(self, evaluator=None):
FILE: bluepyopt/parameters.py
class Parameter (line 23) | class Parameter(object):
method __init__ (line 27) | def __init__(self, name, value=None, frozen=False, bounds=None,
method lower_bound (line 42) | def lower_bound(self):
method upper_bound (line 50) | def upper_bound(self):
method value (line 58) | def value(self):
method freeze (line 62) | def freeze(self, value):
method unfreeze (line 67) | def unfreeze(self):
method value (line 73) | def value(self, value):
method check_bounds (line 83) | def check_bounds(self):
method __str__ (line 92) | def __str__(self):
class MetaListEqualParameter (line 98) | class MetaListEqualParameter(Parameter):
method __init__ (line 102) | def __init__(self, name, value=None, frozen=False,
method value (line 126) | def value(self, value):
method freeze (line 134) | def freeze(self, value):
method unfreeze (line 142) | def unfreeze(self):
method check_bounds (line 149) | def check_bounds(self):
method __str__ (line 157) | def __str__(self):
FILE: bluepyopt/stoppingCriteria.py
class StoppingCriteria (line 19) | class StoppingCriteria(object):
method __init__ (line 22) | def __init__(self):
method check (line 27) | def check(self, kwargs):
method reset (line 31) | def reset(self):
FILE: bluepyopt/tests/disable_simplecell_scoop.py
function disabled_scoop (line 27) | def disabled_scoop():
FILE: bluepyopt/tests/test_bluepyopt.py
function test_import (line 29) | def test_import():
FILE: bluepyopt/tests/test_deapext/deapext_test_utils.py
function make_mock_population (line 8) | def make_mock_population(features_count=5, population_count=5):
function make_population (line 31) | def make_population(features_count=5, population_count=5):
FILE: bluepyopt/tests/test_deapext/test_algorithms.py
function test_eaAlphaMuPlusLambdaCheckpoint (line 16) | def test_eaAlphaMuPlusLambdaCheckpoint():
function test_eaAlphaMuPlusLambdaCheckpoint_with_checkpoint (line 57) | def test_eaAlphaMuPlusLambdaCheckpoint_with_checkpoint():
FILE: bluepyopt/tests/test_deapext/test_hype.py
function test_hypeIndicatorExact (line 11) | def test_hypeIndicatorExact():
function test_hypeIndicatorSampled (line 25) | def test_hypeIndicatorSampled():
FILE: bluepyopt/tests/test_deapext/test_optimisations.py
function test_DEAPOptimisation_constructor (line 14) | def test_DEAPOptimisation_constructor():
function test_IBEADEAPOptimisation_constructor (line 34) | def test_IBEADEAPOptimisation_constructor():
function test_DEAPOptimisation_run (line 46) | def test_DEAPOptimisation_run():
function test_DEAPOptimisation_run_from_parents (line 64) | def test_DEAPOptimisation_run_from_parents():
function test_selectorname (line 80) | def test_selectorname():
FILE: bluepyopt/tests/test_deapext/test_optimisationsCMA.py
function test_optimisationsCMA_normspace (line 9) | def test_optimisationsCMA_normspace():
function test_optimisationsCMA_SO_run (line 26) | def test_optimisationsCMA_SO_run():
function test_optimisationsCMA_MO_run (line 44) | def test_optimisationsCMA_MO_run():
FILE: bluepyopt/tests/test_deapext/test_selIBEA.py
function test_calc_fitness_components (line 16) | def test_calc_fitness_components():
function test_mating_selection (line 41) | def test_mating_selection():
function test_selibea_init (line 53) | def test_selibea_init():
FILE: bluepyopt/tests/test_deapext/test_stoppingCriteria.py
function test_MaxNGen (line 10) | def test_MaxNGen():
FILE: bluepyopt/tests/test_deapext/test_utils.py
function flag (line 10) | def flag(event):
function catch_event (line 16) | def catch_event(event):
function test_run_next_gen_condition (line 30) | def test_run_next_gen_condition():
FILE: bluepyopt/tests/test_ephys/test_acc.py
function test_arbor_labels (line 10) | def test_arbor_labels():
FILE: bluepyopt/tests/test_ephys/test_create_acc.py
function test_read_templates (line 40) | def test_read_templates():
function test_Nrn2ArbParamAdapter_param_name (line 52) | def test_Nrn2ArbParamAdapter_param_name():
function test_Nrn2ArbParamAdapter_param_value (line 67) | def test_Nrn2ArbParamAdapter_param_value():
function test_Nrn2ArbParamAdapter_format (line 85) | def test_Nrn2ArbParamAdapter_format():
function test_Nrn2ArbMechGrouper_format_params_and_group_by_mech (line 164) | def test_Nrn2ArbMechGrouper_format_params_and_group_by_mech():
function test_Nrn2ArbMechGrouper_process_global (line 185) | def test_Nrn2ArbMechGrouper_process_global():
function test_Nrn2ArbMechGrouper_is_global_property (line 200) | def test_Nrn2ArbMechGrouper_is_global_property():
function test_separate_global_properties (line 211) | def test_separate_global_properties():
function test_Nrn2ArbMechGrouper_process_local (line 226) | def test_Nrn2ArbMechGrouper_process_local():
function test_ArbNmodlMechFormatter_load_mech_catalogue_meta (line 256) | def test_ArbNmodlMechFormatter_load_mech_catalogue_meta():
function test_ArbNmodlMechFormatter_mech_name (line 266) | def test_ArbNmodlMechFormatter_mech_name():
function test_ArbNmodlMechFormatter_translate_density (line 273) | def test_ArbNmodlMechFormatter_translate_density():
function test_arb_populate_label_dict (line 309) | def test_arb_populate_label_dict():
function test_create_acc (line 330) | def test_create_acc():
function test_create_acc_filename (line 395) | def test_create_acc_filename():
function test_create_acc_replace_axon (line 445) | def test_create_acc_replace_axon():
function make_cell (line 495) | def make_cell(replace_axon):
function run_short_sim (line 530) | def run_short_sim(cable_cell):
function test_cell_model_write_and_read_acc (line 544) | def test_cell_model_write_and_read_acc():
function test_cell_model_write_and_read_acc_replace_axon (line 575) | def test_cell_model_write_and_read_acc_replace_axon():
function test_cell_model_create_acc_replace_axon_without_instantiate (line 627) | def test_cell_model_create_acc_replace_axon_without_instantiate():
function check_acc_dir (line 642) | def check_acc_dir(test_dir, ref_dir):
function test_write_acc_simple (line 675) | def test_write_acc_simple():
function test_write_acc_l5pc (line 719) | def test_write_acc_l5pc():
function test_write_acc_expsyn (line 779) | def test_write_acc_expsyn():
FILE: bluepyopt/tests/test_ephys/test_create_hoc.py
function test_generate_channels_by_location (line 28) | def test_generate_channels_by_location():
function test__generate_channels_by_location (line 41) | def test__generate_channels_by_location():
function test_generate_parameters (line 58) | def test_generate_parameters():
function test__generate_parameters (line 69) | def test__generate_parameters():
function test_create_hoc (line 92) | def test_create_hoc():
function test_create_hoc_filename (line 105) | def test_create_hoc_filename():
function test_generate_reinitrng (line 128) | def test_generate_reinitrng():
function test_range_exprs_to_hoc (line 137) | def test_range_exprs_to_hoc():
function test_range_exprs_to_hoc_step_scaler (line 159) | def test_range_exprs_to_hoc_step_scaler():
FILE: bluepyopt/tests/test_ephys/test_evaluators.py
function test_CellEvaluator_init (line 22) | def test_CellEvaluator_init():
function test_CellEvaluator_evaluate (line 45) | def test_CellEvaluator_evaluate():
FILE: bluepyopt/tests/test_ephys/test_extra_features_utils.py
function test_peak_to_valley (line 21) | def test_peak_to_valley():
function test_peak_trough_ratio (line 29) | def test_peak_trough_ratio():
function test_halfwidth (line 38) | def test_halfwidth():
function test_repolarization_slope (line 49) | def test_repolarization_slope():
function test_recovery_slope (line 64) | def test_recovery_slope():
function test_peak_image (line 75) | def test_peak_image():
function test_relative_amplitude (line 91) | def test_relative_amplitude():
function test_peak_time_diff (line 107) | def test_peak_time_diff():
function test__get_trough_and_peak_idx (line 123) | def test__get_trough_and_peak_idx():
function test_calculate_features (line 133) | def test_calculate_features():
FILE: bluepyopt/tests/test_ephys/test_features.py
function test_EFeature (line 15) | def test_EFeature():
function test_eFELFeature (line 22) | def test_eFELFeature():
function test_eFELFeature_max_score (line 52) | def test_eFELFeature_max_score():
function test_eFELFeature_force_max_score (line 89) | def test_eFELFeature_force_max_score():
function test_eFELFeature_double_settings (line 126) | def test_eFELFeature_double_settings():
function test_eFELFeature_int_settings (line 164) | def test_eFELFeature_int_settings():
function test_eFELFeature_string_settings (line 200) | def test_eFELFeature_string_settings():
function test_eFELFeature_serialize (line 234) | def test_eFELFeature_serialize():
function test_extraFELFeature (line 252) | def test_extraFELFeature():
function test_masked_cosine_distance (line 332) | def test_masked_cosine_distance():
FILE: bluepyopt/tests/test_ephys/test_init.py
function test_import (line 30) | def test_import():
function test_ephys_base (line 36) | def test_ephys_base():
FILE: bluepyopt/tests/test_ephys/test_locations.py
function test_location_init (line 33) | def test_location_init():
class TestNrnSectionCompLocation (line 42) | class TestNrnSectionCompLocation(object):
method setup_method (line 45) | def setup_method(self):
method test_instantiate (line 56) | def test_instantiate(self):
class TestNrnSeclistCompLocation (line 81) | class TestNrnSeclistCompLocation(object):
method setup_method (line 84) | def setup_method(self):
method test_instantiate (line 95) | def test_instantiate(self):
class TestNrnSeclistSecLocation (line 126) | class TestNrnSeclistSecLocation(object):
method setup_method (line 129) | def setup_method(self):
method test_instantiate (line 140) | def test_instantiate(self):
class TestNrnSomaDistanceCompLocation (line 168) | class TestNrnSomaDistanceCompLocation(object):
method setup_method (line 171) | def setup_method(self):
method test_instantiate (line 179) | def test_instantiate(self):
class TestNrnSecSomaDistanceCompLocation (line 213) | class TestNrnSecSomaDistanceCompLocation(object):
method setup_method (line 216) | def setup_method(self):
method test_instantiate (line 227) | def test_instantiate(self):
class TestNrnTrunkSomaDistanceCompLocation (line 265) | class TestNrnTrunkSomaDistanceCompLocation(object):
method setup_method (line 268) | def setup_method(self):
method test_instantiate (line 280) | def test_instantiate(self):
function test_serialize (line 332) | def test_serialize():
FILE: bluepyopt/tests/test_ephys/test_mechanisms.py
function test_mechanism_serialize (line 24) | def test_mechanism_serialize():
function test_nrnmod_instantiate (line 34) | def test_nrnmod_instantiate():
function compare_strings (line 84) | def compare_strings(s1, s2):
function test_nrnmod_reinitrng_block (line 97) | def test_nrnmod_reinitrng_block():
function test_nrnmod_determinism (line 137) | def test_nrnmod_determinism():
function test_pprocess_instantiate (line 159) | def test_pprocess_instantiate():
function test_string_hash_functions (line 204) | def test_string_hash_functions():
FILE: bluepyopt/tests/test_ephys/test_models.py
function yield_blank_hoc (line 23) | def yield_blank_hoc(template_name):
function test_create_empty_template (line 37) | def test_create_empty_template():
function test_model (line 46) | def test_model():
function test_cellmodel (line 55) | def test_cellmodel():
function test_cellmodel_namecheck (line 70) | def test_cellmodel_namecheck():
function test_load_hoc_template (line 88) | def test_load_hoc_template():
function test_HocCellModel (line 98) | def test_HocCellModel():
function test_CellModel_create_empty_cell (line 120) | def test_CellModel_create_empty_cell():
function test_CellModel_create_hoc (line 129) | def test_CellModel_create_hoc():
function test_CellModel_destroy (line 153) | def test_CellModel_destroy():
function test_lfpy_create_empty_template (line 184) | def test_lfpy_create_empty_template():
function test_lfpycellmodel (line 195) | def test_lfpycellmodel():
function test_lfpycellmodel_namecheck (line 212) | def test_lfpycellmodel_namecheck():
function test_load_lfpy_hoc_template (line 226) | def test_load_lfpy_hoc_template():
function test_LFPyCellModel_create_empty_cell (line 238) | def test_LFPyCellModel_create_empty_cell():
function test_LFPyCellModel_create_hoc (line 247) | def test_LFPyCellModel_create_hoc():
function test_LFPyCellModel_destroy (line 274) | def test_LFPyCellModel_destroy():
function test_metaparameter (line 303) | def test_metaparameter():
FILE: bluepyopt/tests/test_ephys/test_morphologies.py
function test_morphology_init (line 24) | def test_morphology_init():
function test_nrnfilemorphology_init (line 32) | def test_nrnfilemorphology_init():
function test_nrnfilemorphology_replace_axon (line 54) | def test_nrnfilemorphology_replace_axon():
function test_nrnfilemorphology_replace_axon_ax1 (line 78) | def test_nrnfilemorphology_replace_axon_ax1():
function test_nrnfilemorphology_replace_axon_ax2 (line 102) | def test_nrnfilemorphology_replace_axon_ax2():
function test_serialize (line 126) | def test_serialize():
FILE: bluepyopt/tests/test_ephys/test_objectives.py
function test_EFeatureObjective (line 13) | def test_EFeatureObjective():
function test_SingletonObjective (line 51) | def test_SingletonObjective():
function test_SingletonWeightObjective (line 92) | def test_SingletonWeightObjective():
function test_MaxObjective (line 133) | def test_MaxObjective():
function test_WeightedSumObjective (line 180) | def test_WeightedSumObjective():
FILE: bluepyopt/tests/test_ephys/test_parameters.py
function test_pprocessparam_instantiate (line 17) | def test_pprocessparam_instantiate():
function test_serialize (line 50) | def test_serialize():
function test_metaparameter (line 63) | def test_metaparameter():
FILE: bluepyopt/tests/test_ephys/test_parameterscalers.py
function test_NrnSegmentSomaDistanceScaler_dist_params (line 19) | def test_NrnSegmentSomaDistanceScaler_dist_params():
function test_NrnSegmentSectionDistanceScaler_eval_dist_with_dict (line 42) | def test_NrnSegmentSectionDistanceScaler_eval_dist_with_dict():
function test_NrnSegmentSomaDistanceStepScaler_eval_dist_with_dict (line 57) | def test_NrnSegmentSomaDistanceStepScaler_eval_dist_with_dict():
function test_serialize (line 73) | def test_serialize():
function test_parameterscalers_iexpr_generator (line 96) | def test_parameterscalers_iexpr_generator():
function test_parameterscalers_iexpr_generator_non_existent_op (line 115) | def test_parameterscalers_iexpr_generator_non_existent_op():
function test_parameterscalers_iexpr_generator_unsupported_attr (line 133) | def test_parameterscalers_iexpr_generator_unsupported_attr():
function test_parameterscalers_iexpr (line 151) | def test_parameterscalers_iexpr():
FILE: bluepyopt/tests/test_ephys/test_protocols.py
function test_distloc_exception (line 32) | def test_distloc_exception():
function run_RuntimeError (line 83) | def run_RuntimeError(
function run_NrnSimulatorException (line 93) | def run_NrnSimulatorException(
function test_sweepprotocol_init (line 104) | def test_sweepprotocol_init():
function test_sequenceprotocol_init (line 145) | def test_sequenceprotocol_init():
function test_sequenceprotocol_run (line 188) | def test_sequenceprotocol_run():
function test_sequenceprotocol_overwrite (line 233) | def test_sequenceprotocol_overwrite():
function test_stepprotocol_init (line 279) | def test_stepprotocol_init():
function test_sweepprotocol_run_unisolated (line 317) | def test_sweepprotocol_run_unisolated():
function test_sweepprotocol_run_isolated (line 369) | def test_sweepprotocol_run_isolated():
function test_nrnsimulator_exception (line 420) | def test_nrnsimulator_exception():
function test_sweepprotocol_instantiate_with_LFPyCellModel (line 473) | def test_sweepprotocol_instantiate_with_LFPyCellModel():
FILE: bluepyopt/tests/test_ephys/test_recordings.py
function test_comprecording_init (line 33) | def test_comprecording_init():
function test_lfprecording_init (line 82) | def test_lfprecording_init():
function test_lfprecording_instantiate (line 94) | def test_lfprecording_instantiate():
FILE: bluepyopt/tests/test_ephys/test_serializer.py
class ClassforTesting (line 11) | class ClassforTesting(ephys.serializer.DictMixin):
method __init__ (line 16) | def __init__(self, string, boolean, float_, list_, dict_):
class NestedClassforTesting (line 24) | class NestedClassforTesting(ephys.serializer.DictMixin):
method __init__ (line 30) | def __init__(self, test, tuples, lists, dicts):
function test_serializer (line 38) | def test_serializer():
function test_roundtrip_serializer (line 47) | def test_roundtrip_serializer():
function test_nested_serializer (line 57) | def test_nested_serializer():
function test_non_instantiable (line 74) | def test_non_instantiable():
FILE: bluepyopt/tests/test_ephys/test_simulators.py
function test_nrnsimulator_init (line 37) | def test_nrnsimulator_init():
function test_nrnsimulator_init_windows (line 45) | def test_nrnsimulator_init_windows():
function test_nrnsimulator_cvode_minstep (line 61) | def test_nrnsimulator_cvode_minstep():
function test_neuron_import (line 110) | def test_neuron_import():
function test_nrnsim_run_dt_exception (line 119) | def test_nrnsim_run_dt_exception():
function test_nrnsim_run_cvodeactive_dt_exception (line 130) | def test_nrnsim_run_cvodeactive_dt_exception():
function test_disable_banner_exception (line 142) | def test_disable_banner_exception(mock_glob):
function test_lfpysimulator_init (line 154) | def test_lfpysimulator_init():
function test_lfpyimulator_init_windows (line 163) | def test_lfpyimulator_init_windows():
function test__lfpysimulator_neuron_import (line 180) | def test__lfpysimulator_neuron_import():
function test_lfpysim_run_cvodeactive_dt_exception (line 190) | def test_lfpysim_run_cvodeactive_dt_exception():
function test_lfpysimulator_disable_banner_exception (line 228) | def test_lfpysimulator_disable_banner_exception(mock_glob):
FILE: bluepyopt/tests/test_ephys/test_stimuli.py
function test_stimulus_init (line 35) | def test_stimulus_init():
function test_NrnNetStimStimulus_init (line 43) | def test_NrnNetStimStimulus_init():
function test_NrnNetStimStimulus_instantiate (line 55) | def test_NrnNetStimStimulus_instantiate():
function test_NrnCurrentPlayStimulus_instantiate (line 97) | def test_NrnCurrentPlayStimulus_instantiate():
function test_NrnRampPulse_init (line 129) | def test_NrnRampPulse_init():
function test_NrnRampPulse_instantiate (line 136) | def test_NrnRampPulse_instantiate():
function test_LFPySquarePulse_init (line 220) | def test_LFPySquarePulse_init():
function test_LFPySquarePulse_instantiate (line 248) | def test_LFPySquarePulse_instantiate():
FILE: bluepyopt/tests/test_ephys/testmodels/dummycells.py
class DummyCellModel1 (line 6) | class DummyCellModel1(ephys.models.Model):
method __init__ (line 10) | def __init__(self, name=None):
method freeze (line 17) | def freeze(self, param_values):
method unfreeze (line 21) | def unfreeze(self, param_names):
method instantiate (line 25) | def instantiate(self, sim=None):
method destroy (line 57) | def destroy(self, sim=None):
class DummyLFPyCellModel1 (line 63) | class DummyLFPyCellModel1(ephys.models.Model):
method __init__ (line 67) | def __init__(self, name=None):
method freeze (line 77) | def freeze(self, param_values):
method unfreeze (line 81) | def unfreeze(self, param_names):
method instantiate (line 85) | def instantiate(self, sim=None):
method destroy (line 127) | def destroy(self, sim=None):
FILE: bluepyopt/tests/test_ephys/utils.py
function make_mech (line 9) | def make_mech():
function make_parameters (line 23) | def make_parameters():
FILE: bluepyopt/tests/test_evaluators.py
function test_evaluator_init (line 31) | def test_evaluator_init():
FILE: bluepyopt/tests/test_l5pc.py
function load_from_json (line 59) | def load_from_json(filename):
function dump_to_json (line 66) | def dump_to_json(content, filename):
function test_import (line 73) | def test_import():
class TestL5PCModel (line 81) | class TestL5PCModel(object):
method setup_method (line 84) | def setup_method(self):
method test_instantiate (line 94) | def test_instantiate(self):
method teardown_method (line 99) | def teardown_method(self):
class TestL5PCEvaluator (line 104) | class TestL5PCEvaluator(object):
method setup_method (line 107) | def setup_method(self):
method test_eval (line 119) | def test_eval(self):
method teardown_method (line 137) | def teardown_method(self):
function stdout_redirector (line 144) | def stdout_redirector(stream):
function test_exec (line 155) | def test_exec():
function test_l5pc_validate_neuron_arbor (line 184) | def test_l5pc_validate_neuron_arbor():
FILE: bluepyopt/tests/test_lfpy.py
function test_lfpy_evaluator (line 20) | def test_lfpy_evaluator():
FILE: bluepyopt/tests/test_neuroml_fcts.py
function test_get_nml_mech_dir (line 54) | def test_get_nml_mech_dir():
function test_get_channel_from_param_name (line 84) | def test_get_channel_from_param_name():
function test_format_dist_fun (line 102) | def test_format_dist_fun():
function test_add_nml_channel_to_nml_cell_file (line 110) | def test_add_nml_channel_to_nml_cell_file():
function test_get_arguments (line 149) | def test_get_arguments():
function test_extract_parameter_value (line 225) | def test_extract_parameter_value():
function test_get_density (line 280) | def test_get_density():
function test_get_specific_capacitance (line 306) | def test_get_specific_capacitance():
function test_get_biophys (line 359) | def test_get_biophys():
function test_add_segment_groups (line 382) | def test_add_segment_groups():
function test_neuroml_run (line 407) | def test_neuroml_run():
FILE: bluepyopt/tests/test_parameters.py
function test_parameters_init (line 31) | def test_parameters_init():
function test_parameters_fields (line 40) | def test_parameters_fields():
function test_parameters_str (line 56) | def test_parameters_str():
function test_MetaListEqualParameter_init (line 69) | def test_MetaListEqualParameter_init():
function test_MetaListEqualParameter_freeze_unfreeze (line 95) | def test_MetaListEqualParameter_freeze_unfreeze():
function test_MetaListEqualParamete_str (line 123) | def test_MetaListEqualParamete_str():
FILE: bluepyopt/tests/test_simplecell.py
class TestSimpleCellClass (line 13) | class TestSimpleCellClass(object):
method setup_method (line 16) | def setup_method(self):
method test_exec (line 24) | def test_exec(self):
method teardown_method (line 35) | def teardown_method(self):
class TestSimpleCellArborClass (line 42) | class TestSimpleCellArborClass(object):
method setup_method (line 45) | def setup_method(self):
method test_exec (line 53) | def test_exec(self):
method teardown_method (line 64) | def teardown_method(self):
FILE: bluepyopt/tests/test_stochkv.py
function compare_strings (line 22) | def compare_strings(s1, s2):
function test_import (line 34) | def test_import():
function test_run (line 40) | def test_run():
function test_run_stochkv3 (line 72) | def test_run_stochkv3():
FILE: bluepyopt/tests/test_tools.py
function test_load (line 7) | def test_load():
function test_uint32_seed (line 14) | def test_uint32_seed():
FILE: bluepyopt/tools.py
function uint32_seed (line 6) | def uint32_seed(string):
FILE: cloud-config/config/amazon/gather_config.py
function _get_instances_by_tag (line 32) | def _get_instances_by_tag(ec2, tag):
function get_head_public_ip (line 40) | def get_head_public_ip(ec2):
function get_work_private_ips (line 48) | def get_work_private_ips(ec2, tag=WORKER_INSTANCE_NAME, include_head=True):
function get_parser (line 58) | def get_parser():
function main (line 68) | def main():
FILE: examples/expsyn/expsyn.py
function create_model (line 13) | def create_model(sim, do_replace_axon, return_locations=False):
function main (line 83) | def main(args):
FILE: examples/expsyn/generate_acc.py
function main (line 25) | def main():
FILE: examples/graupnerbrunelstdp/gbevaluator.py
function gbParam (line 9) | def gbParam(params):
class GraupnerBrunelEvaluator (line 27) | class GraupnerBrunelEvaluator(bpop.evaluators.Evaluator):
method __init__ (line 31) | def __init__(self):
method get_param_dict (line 60) | def get_param_dict(self, param_values):
method compute_synaptic_gain_with_lists (line 69) | def compute_synaptic_gain_with_lists(self, param_values):
method evaluate_with_lists (line 82) | def evaluate_with_lists(self, param_values):
FILE: examples/graupnerbrunelstdp/run_fit.py
function run_model (line 20) | def run_model():
function plot_log (line 27) | def plot_log(log):
function plot_epspamp_discrete (line 72) | def plot_epspamp_discrete(dt, model_sg, sg, stderr):
function plot_calcium_transients (line 90) | def plot_calcium_transients(protocols, best_ind_dict):
function plot_dt_scan (line 125) | def plot_dt_scan(best_ind_dict, good_solutions, dt, sg, stderr):
function analyse (line 174) | def analyse():
function main (line 215) | def main():
FILE: examples/graupnerbrunelstdp/stdputil.py
function logging_debug_vec (line 33) | def logging_debug_vec(fmt, vec):
function logging_debug (line 39) | def logging_debug(*args):
class Protocol (line 81) | class Protocol(object):
method __init__ (line 85) | def __init__(self, stim_vec, delta_vec, f, n, prot_id=None):
method sort (line 116) | def sort(self):
class CalciumTrace (line 139) | class CalciumTrace(object):
method __init__ (line 143) | def __init__(self, protocol, model):
method materializetrace (line 192) | def materializetrace(self):
method event (line 212) | def event(self):
method time (line 217) | def time(self):
method amplitude (line 222) | def amplitude(self):
function load_neviansakmann (line 227) | def load_neviansakmann():
function time_above_threshold (line 252) | def time_above_threshold(protocol, param):
function transition_prob (line 338) | def transition_prob(protocol, param=None):
function protocol_outcome (line 398) | def protocol_outcome(protocol, param=None):
FILE: examples/graupnerbrunelstdp/test_stdputil.py
function test_protocol_outcome (line 11) | def test_protocol_outcome():
FILE: examples/l5pc/benchmark/task_stats.py
function get_engine_data (line 10) | def get_engine_data():
function plot_usage (line 50) | def plot_usage(tasks, engine_number_map):
function plot_duration_histogram (line 70) | def plot_duration_histogram(tasks):
function filter_start_time (line 84) | def filter_start_time(start_time, tasks):
function calculate_unused_compute (line 93) | def calculate_unused_compute(tasks):
function main (line 105) | def main():
FILE: examples/l5pc/convert_params.py
function main (line 6) | def main():
FILE: examples/l5pc/create_tables.py
function load_json (line 6) | def load_json(filename):
function create_feature_fields (line 12) | def create_feature_fields():
function create_param_fields_list (line 44) | def create_param_fields_list():
function create_param_fields_string (line 103) | def create_param_fields_string():
function create_table (line 143) | def create_table(field_content, n_of_cols):
function main (line 156) | def main():
FILE: examples/l5pc/generate_acc.py
function main (line 26) | def main():
FILE: examples/l5pc/generate_hoc.py
function main (line 40) | def main():
FILE: examples/l5pc/hocmodel.py
class HocModel (line 12) | class HocModel(object):
method __init__ (line 16) | def __init__(
method instantiate (line 36) | def instantiate(self):
method run_protocol (line 48) | def run_protocol(self, protocol):
method destroy (line 69) | def destroy(self):
method run_protocols (line 75) | def run_protocols(self, protocols, param_values=None):
method __str__ (line 102) | def __str__(self):
FILE: examples/l5pc/l5pc_analysis.py
function set_rcoptions (line 55) | def set_rcoptions(func):
function get_responses (line 67) | def get_responses(cell_evaluator, individuals, filename):
function analyse_cp (line 89) | def analyse_cp(opt, cp_filename, responses_filename, figs, sim='nrn'):
function plot_log (line 126) | def plot_log(log, fig=None, box=None):
function plot_history (line 176) | def plot_history(history):
function plot_objectives (line 192) | def plot_objectives(objectives, fig=None, box=None):
function plot_responses (line 222) | def plot_responses(responses, fig=None, box=None):
function get_slice (line 236) | def get_slice(start, end, data):
function plot_multiple_responses (line 241) | def plot_multiple_responses(responses, fig):
function plot_recording (line 269) | def plot_recording(recording, fig=None, box=None, xlabel=False):
function plot_validation (line 308) | def plot_validation(opt, parameters):
function analyse_releasecircuit_model (line 453) | def analyse_releasecircuit_model(opt, figs, box=None, sim='nrn'):
function analyse_releasecircuit_hocmodel (line 481) | def analyse_releasecircuit_hocmodel(opt, fig=None, box=None):
function plot_individual_params (line 519) | def plot_individual_params(
function plot_diversity (line 563) | def plot_diversity(opt, checkpoint_file, fig, param_names):
FILE: examples/l5pc/l5pc_evaluator.py
function define_protocols (line 38) | def define_protocols(do_replace_axon=True, sim='nrn'):
function load_protocols (line 44) | def load_protocols():
function create_protocols (line 53) | def create_protocols(protocol_definitions, do_replace_axon=None, sim='nr...
function define_fitness_calculator (line 133) | def define_fitness_calculator(protocols):
function create (line 184) | def create(do_replace_axon=True, sim='nrn'):
FILE: examples/l5pc/l5pc_model.py
function define_mechanisms (line 36) | def define_mechanisms():
function load_mechanisms (line 43) | def load_mechanisms():
function create_mechanisms (line 52) | def create_mechanisms(mech_definitions):
function define_parameters (line 70) | def define_parameters():
function load_parameters (line 77) | def load_parameters():
function create_parameters (line 81) | def create_parameters(param_configs):
function define_morphology (line 147) | def define_morphology(do_replace_axon):
function create (line 157) | def create(do_replace_axon=True):
FILE: examples/l5pc/l5pc_validate_neuron_arbor_pm.py
function powerset (line 82) | def powerset(mechs): # from itertools docs
function get_extra_params (line 89) | def get_extra_params(loc, mechs):
FILE: examples/l5pc/opt_l5pc.py
function create_optimizer (line 46) | def create_optimizer(args):
function get_parser (line 76) | def get_parser():
function main (line 113) | def main(): # pylint: disable=too-many-statements
FILE: examples/l5pc/tasks2dataframe.py
function _open_db (line 17) | def _open_db(dbfile):
function _add_buffers (line 29) | def _add_buffers(df, arg_names=None, result_names=None, delete_buffers=F...
function create_df (line 69) | def create_df(db, arg_names=None, result_names=None):
FILE: examples/l5pc_lfpy/generate_extra_features.py
class NumpyEncoder (line 54) | class NumpyEncoder(json.JSONEncoder):
method default (line 55) | def default(self, obj):
function dict_to_json (line 82) | def dict_to_json(data, path):
function add_extra_objectives (line 88) | def add_extra_objectives(evaluator):
function save_extra_efeatures (line 129) | def save_extra_efeatures(efeature_values):
FILE: examples/l5pc_lfpy/l5pc_lfpy_evaluator.py
function define_protocols (line 43) | def define_protocols():
function get_feature_name (line 81) | def get_feature_name(protocol_name, location, feature):
function get_recording_names (line 85) | def get_recording_names(protocol_name, location=None):
function define_fitness_calculator (line 89) | def define_fitness_calculator(protocols, feature_file):
function create (line 139) | def create(feature_file="extra_features.json", cvode_active=True, dt=None):
FILE: examples/l5pc_lfpy/l5pc_lfpy_model.py
function define_electrode (line 35) | def define_electrode(
function create (line 73) | def create():
FILE: examples/simplecell/generate_acc.py
function main (line 26) | def main():
FILE: examples/simplecell/generate_hoc.py
function main (line 23) | def main():
FILE: examples/simplecell/simplecell_model.py
function define_morphology (line 26) | def define_morphology(do_replace_axon):
function define_mechanisms (line 31) | def define_mechanisms():
function define_parameters (line 41) | def define_parameters():
function create (line 66) | def create(do_replace_axon):
FILE: examples/stochkv/stochkv3cell.py
function stochkv3_hoc_filename (line 13) | def stochkv3_hoc_filename(deterministic=False):
function run_stochkv3_model (line 21) | def run_stochkv3_model(deterministic=False):
function main (line 128) | def main():
FILE: examples/stochkv/stochkvcell.py
function stochkv_hoc_filename (line 13) | def stochkv_hoc_filename(deterministic=False):
function run_stochkv_model (line 21) | def run_stochkv_model(deterministic=False):
function main (line 128) | def main():
FILE: examples/thalamocortical-cell/CellEvalSetup/evaluator.py
function read_step_protocol (line 41) | def read_step_protocol(protocol_name,
function read_ramp_protocol (line 73) | def read_ramp_protocol(
function define_protocols (line 108) | def define_protocols(protocols_filename, stochkv_det=None,
class eFELFeatureExtra (line 157) | class eFELFeatureExtra(eFELFeature):
method __init__ (line 165) | def __init__(
method get_bpo_score (line 211) | def get_bpo_score(self, responses):
method calculate_feature (line 221) | def calculate_feature(self, responses, raise_warnings=False):
method calculate_score (line 251) | def calculate_score(self, responses, trace_check=False):
class SingletonWeightObjective (line 289) | class SingletonWeightObjective(EFeatureObjective):
method __init__ (line 293) | def __init__(self, name, feature, weight):
method calculate_score (line 304) | def calculate_score(self, responses):
method __str__ (line 309) | def __str__(self):
function define_fitness_calculator (line 315) | def define_fitness_calculator(main_protocol, features_filename, prefix=""):
function create (line 406) | def create(etype, runopt=False, altmorph=None):
FILE: examples/thalamocortical-cell/CellEvalSetup/protocols.py
class StepProtocolCustom (line 43) | class StepProtocolCustom(ephys.protocols.StepProtocol):
method __init__ (line 47) | def __init__(
method run (line 64) | def run(self, cell_model, param_values, sim=None, isolate=None):
class RampProtocol (line 83) | class RampProtocol(ephys.protocols.SweepProtocol):
method __init__ (line 87) | def __init__(
method step_delay (line 116) | def step_delay(self):
method step_duration (line 121) | def step_duration(self):
FILE: examples/thalamocortical-cell/CellEvalSetup/template.py
function multi_locations (line 38) | def multi_locations(sectionlist):
function define_mechanisms (line 76) | def define_mechanisms(params_filename):
function define_parameters (line 100) | def define_parameters(params_filename):
function define_morphology (line 187) | def define_morphology(morphology_filename, do_set_nseg=1e9):
function create (line 197) | def create(recipe, etype, altmorph=None):
FILE: examples/thalamocortical-cell/CellEvalSetup/tools.py
function rename_prot (line 1) | def rename_prot(name):
function rename_featpart (line 34) | def rename_featpart(name):
function rename_feat (line 63) | def rename_feat(name, sep = " "):
FILE: examples/tsodyksmarkramstp/tmevaluator.py
class TsodyksMarkramEvaluator (line 31) | class TsodyksMarkramEvaluator(bpop.evaluators.Evaluator):
method __init__ (line 32) | def __init__(self, t, v, tstim, params):
method generate_model (line 70) | def generate_model(self, individual):
method evaluate_with_lists (line 78) | def evaluate_with_lists(self, individual):
method init_simulator_and_evaluate_with_lists (line 86) | def init_simulator_and_evaluate_with_lists(self, individual):
FILE: examples/tsodyksmarkramstp/tmevaluator_multiplefreqs.py
class TsodyksMarkramEvaluator (line 31) | class TsodyksMarkramEvaluator(bpop.evaluators.Evaluator):
method __init__ (line 32) | def __init__(self, data, params):
method generate_model (line 58) | def generate_model(self, freq, individual):
method evaluate_with_lists (line 64) | def evaluate_with_lists(self, individual):
method init_simulator_and_evaluate_with_lists (line 73) | def init_simulator_and_evaluate_with_lists(self, individual):
FILE: examples/tsodyksmarkramstp/tmodeint.py
function integrate (line 35) | def integrate(sampstim, nsamples, dt, vRest, Trec, Tfac,
FILE: examples/tsodyksmarkramstp/tmodesolve.py
function solve_TM (line 26) | def solve_TM(t_stims, USE, Trec, Tfac, ASE):
FILE: misc/github_wiki/creates_publication_list_markdown.py
class Style (line 15) | class Style(OriginalStyle):
method format_title (line 19) | def format_title(self, e, which_field, as_sentence=True):
function put_bullet_points (line 30) | def put_bullet_points(input):
Copy disabled (too large)
Download .json
Condensed preview — 363 files, each showing path, character count, and a content snippet. Download the .json file for the full structured content (11,946K chars).
[
{
"path": ".coveragerc",
"chars": 81,
"preview": "[run]\nomit = */tests/*,bluepyopt/_version.py\n[report]\nomit=bluepyopt/_version.py\n"
},
{
"path": ".gitattributes",
"chars": 35,
"preview": "bluepyopt/_version.py export-subst\n"
},
{
"path": ".github/workflows/build.yml",
"chars": 1681,
"preview": "name: Build\n\non:\n push:\n branches:\n - master\n tags:\n - '[0-9]+.[0-9]+.[0-9]+'\n\njobs:\n call-test-workfl"
},
{
"path": ".github/workflows/keep-alive.yml",
"chars": 1020,
"preview": "name: Keep-alive\n\non:\n schedule:\n # Runs every sunday at 3 a.m.\n - cron: '0 3 * * SUN'\n\njobs:\n call-test-workflo"
},
{
"path": ".github/workflows/mirror-ebrains.yml",
"chars": 766,
"preview": "name: Mirror to Ebrains\n\non:\n push:\n branches: [ master ]\n\njobs:\n to_ebrains:\n runs-on: ubuntu-latest\n steps:"
},
{
"path": ".github/workflows/test.yml",
"chars": 973,
"preview": "name: Test\n\non:\n pull_request:\n # allows this workflow to be reusable (e.g. by the build workflow)\n workflow_call:\n\nj"
},
{
"path": ".gitignore",
"chars": 149,
"preview": "*.pyc\n*.swp\nx86_64\n/bluepyopt.egg-info/\n/build/\n/dist/\n.DS_Store\n/.tox\n.ipynb_checkpoints\n/.python-version\n/cov_reports\n"
},
{
"path": ".readthedocs.yaml",
"chars": 372,
"preview": "# .readthedocs.yml\n# Read the Docs configuration file\n# See https://docs.readthedocs.io/en/stable/config-file/v2.html fo"
},
{
"path": ".zenodo.json",
"chars": 1857,
"preview": "{\n \"title\" : \"BluePyOpt\",\n \"license\": \"LGPL-3.0\",\n \"upload_type\": \"software\",\n \"description\": \"The Blue Brain Python"
},
{
"path": "AUTHORS.txt",
"chars": 202,
"preview": "Werner Van Geit @ BBP\nChristian Roessert @ BBP\nMike Gevaert @ BBP\nJean-Denis Courcol @ BBP\nGiuseppe Chindemi @ BBP\nTangu"
},
{
"path": "COPYING",
"chars": 35151,
"preview": " GNU GENERAL PUBLIC LICENSE\n Version 3, 29 June 2007\n\n Copyright (C) 2007 Free "
},
{
"path": "COPYING.lesser",
"chars": 7651,
"preview": " GNU LESSER GENERAL PUBLIC LICENSE\n Version 3, 29 June 2007\n\n Copyright (C) 2007"
},
{
"path": "Dockerfile",
"chars": 1248,
"preview": "# Copyright (c) 2016-2022, EPFL/Blue Brain Project\n#\n# This file is part of BluePyOpt <https://github.com/BlueBrain/Blue"
},
{
"path": "LICENSE.txt",
"chars": 1059,
"preview": "BluePyOpt - Bluebrain Python Optimisation Library\n\nBluePyOpt is licensed under the LGPL, unless noted otherwise, e.g., f"
},
{
"path": "MANIFEST.in",
"chars": 461,
"preview": "include versioneer.py\ninclude bluepyopt/_version.py\ninclude bluepyopt/ephys/static/arbor_mechanisms.json\ninclude bluepyo"
},
{
"path": "Makefile",
"chars": 4392,
"preview": "TEST_REQUIREMENTS=nose coverage mock\n\nall: install\ninstall:\n\tpip install -q . --upgrade\ndoc: install\n\tpip install -q sph"
},
{
"path": "README.rst",
"chars": 10666,
"preview": ".. warning::\n The Blue Brain Project concluded in December 2024, so development has ceased under the BlueBrain GitHub "
},
{
"path": "bluepyopt/__init__.py",
"chars": 1701,
"preview": "\"\"\"Init script\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https://github"
},
{
"path": "bluepyopt/api.py",
"chars": 963,
"preview": "\"\"\"Common API functionality\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <h"
},
{
"path": "bluepyopt/deapext/CMA_MO.py",
"chars": 8975,
"preview": "\"\"\"Multi Objective CMA-es class\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue Brain Project\n\n This file is part of BluePyOp"
},
{
"path": "bluepyopt/deapext/CMA_SO.py",
"chars": 7234,
"preview": "\"\"\"Single Objective CMA-es class\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue Brain Project\n\n This file is part of BluePyO"
},
{
"path": "bluepyopt/deapext/__init__.py",
"chars": 18,
"preview": "\"\"\"Init script\"\"\"\n"
},
{
"path": "bluepyopt/deapext/algorithms.py",
"chars": 6691,
"preview": "\"\"\"Optimisation class\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https:/"
},
{
"path": "bluepyopt/deapext/hype.py",
"chars": 3725,
"preview": "\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https://github.com/BlueBrain/Blue"
},
{
"path": "bluepyopt/deapext/optimisations.py",
"chars": 10603,
"preview": "\"\"\"Optimisation class\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https:/"
},
{
"path": "bluepyopt/deapext/optimisationsCMA.py",
"chars": 13571,
"preview": "\"\"\"CMA Optimisation class\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <htt"
},
{
"path": "bluepyopt/deapext/stoppingCriteria.py",
"chars": 9973,
"preview": "\"\"\"StoppingCriteria class\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <htt"
},
{
"path": "bluepyopt/deapext/tools/__init__.py",
"chars": 43,
"preview": "\"\"\"Init\"\"\"\n\nfrom .selIBEA import * # NOQA\n"
},
{
"path": "bluepyopt/deapext/tools/selIBEA.py",
"chars": 4681,
"preview": "\"\"\"IBEA selector\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https://gith"
},
{
"path": "bluepyopt/deapext/utils.py",
"chars": 4409,
"preview": "\"\"\"Utils function\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https://git"
},
{
"path": "bluepyopt/ephys/__init__.py",
"chars": 1515,
"preview": "\"\"\"Init script\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https://github"
},
{
"path": "bluepyopt/ephys/acc.py",
"chars": 1680,
"preview": "'''Dependencies of Arbor simulator backend'''\n\ntry:\n import arbor\nexcept ImportError as e:\n class arbor:\n d"
},
{
"path": "bluepyopt/ephys/base.py",
"chars": 341,
"preview": "'''Base class for ephys classes'''\n\n\nclass BaseEPhys(object):\n '''Base class for ephys classes'''\n\n def __init__(s"
},
{
"path": "bluepyopt/ephys/create_acc.py",
"chars": 33026,
"preview": "\"\"\"create JSON/ACC files for Arbor from a set of BluePyOpt.ephys parameters\"\"\"\n\n# pylint: disable=R0914\n\nimport io\nimpor"
},
{
"path": "bluepyopt/ephys/create_hoc.py",
"chars": 14131,
"preview": "'''create a hoc file from a set of BluePyOpt.ephys parameters'''\n\n# pylint: disable=R0914\n\nimport os\nimport re\n\nfrom col"
},
{
"path": "bluepyopt/ephys/efeatures.py",
"chars": 22370,
"preview": "\"\"\"eFeature classes\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https://g"
},
{
"path": "bluepyopt/ephys/evaluators.py",
"chars": 8693,
"preview": "\"\"\"Cell evaluator class\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https"
},
{
"path": "bluepyopt/ephys/examples/__init__.py",
"chars": 45,
"preview": "\"\"\"Init\"\"\"\n\nfrom . import simplecell # NOQA\n"
},
{
"path": "bluepyopt/ephys/examples/simplecell/__init__.py",
"chars": 46,
"preview": "\"\"\"Init\"\"\"\n\nfrom .simplecell import * # NOQA\n"
},
{
"path": "bluepyopt/ephys/examples/simplecell/simple.swc",
"chars": 100,
"preview": "# Dummy granule cell morphology\n1 1 -5.0 0.0 0.0 5.0 -1\n2 1 0.0 0.0 0.0 5.0 1\n3 1 5.0 0.0 0.0 5.0 2\n"
},
{
"path": "bluepyopt/ephys/examples/simplecell/simplecell.py",
"chars": 3563,
"preview": "\"\"\"Simple cell test model\"\"\"\n\nimport os\n\nimport bluepyopt.ephys as ephys\n\n\nclass SimpleCell:\n def __init__(self):\n "
},
{
"path": "bluepyopt/ephys/extra_features_utils.py",
"chars": 15434,
"preview": "\"\"\"Extra features functions\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <h"
},
{
"path": "bluepyopt/ephys/locations.py",
"chars": 20467,
"preview": "\"\"\"Location classes\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https://g"
},
{
"path": "bluepyopt/ephys/mechanisms.py",
"chars": 10118,
"preview": "\"\"\"\nMechanism classes\n\nTheses classes represent mechanisms in the model\n\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brai"
},
{
"path": "bluepyopt/ephys/models.py",
"chars": 29777,
"preview": "\"\"\"Cell template class\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https:"
},
{
"path": "bluepyopt/ephys/morphologies.py",
"chars": 16096,
"preview": "\"\"\"Morphology classes\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https:/"
},
{
"path": "bluepyopt/ephys/objectives.py",
"chars": 4392,
"preview": "\"\"\"Objective classes\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https://"
},
{
"path": "bluepyopt/ephys/objectivescalculators.py",
"chars": 1742,
"preview": "\"\"\"Score calculator classes\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <h"
},
{
"path": "bluepyopt/ephys/parameters.py",
"chars": 13550,
"preview": "\"\"\"Parameter classes\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https://"
},
{
"path": "bluepyopt/ephys/parameterscalers/__init__.py",
"chars": 32,
"preview": "from .parameterscalers import *\n"
},
{
"path": "bluepyopt/ephys/parameterscalers/acc_iexpr.py",
"chars": 9619,
"preview": "\"\"\"Translate spatially varying parameter-scaler expressions to Arbor iexprs\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue B"
},
{
"path": "bluepyopt/ephys/parameterscalers/parameterscalers.py",
"chars": 11624,
"preview": "\"\"\"Parameter scaler classes\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <h"
},
{
"path": "bluepyopt/ephys/protocols.py",
"chars": 24385,
"preview": "\"\"\"Protocol classes\"\"\"\nfrom .recordings import LFPRecording\nfrom .simulators import LFPySimulator\nfrom .stimuli import L"
},
{
"path": "bluepyopt/ephys/recordings.py",
"chars": 4673,
"preview": "\"\"\"Recording classes\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https://"
},
{
"path": "bluepyopt/ephys/responses.py",
"chars": 2861,
"preview": "\"\"\"Responses classes\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https://"
},
{
"path": "bluepyopt/ephys/serializer.py",
"chars": 2390,
"preview": "'''Mixin class to make dictionaries'''\n\n# Disabling lines below, generate error when loading ephys.examples\n# from futur"
},
{
"path": "bluepyopt/ephys/simulators.py",
"chars": 12145,
"preview": "\"\"\"Simulator classes\"\"\"\n\n# pylint: disable=W0511\n\nimport ctypes\nimport importlib.util\nimport logging\nimport os\nimport pa"
},
{
"path": "bluepyopt/ephys/static/arbor_mechanisms.json",
"chars": 5847,
"preview": "{\n \"allen\": {\n \"CaDynamics\": {\n \"globals\": [\n \"F\"\n ],\n \"ranges"
},
{
"path": "bluepyopt/ephys/stimuli.py",
"chars": 14993,
"preview": "\"\"\"Stimuli classes\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https://gi"
},
{
"path": "bluepyopt/ephys/templates/acc/_json_template.jinja2",
"chars": 707,
"preview": "{ \n \"cell_model_name\": \"{{template_name}}\",\n {%- if banner %}\n \"produced_by\": \"{{banner}}\",\n {%- endif %}\n {%- if m"
},
{
"path": "bluepyopt/ephys/templates/acc/decor_acc_template.jinja2",
"chars": 1953,
"preview": "(arbor-component\n (meta-data (version \"0.9-dev\"))\n (decor\n {%- for mech, params in global_mechs.items() %}\n {%- "
},
{
"path": "bluepyopt/ephys/templates/acc/label_dict_acc_template.jinja2",
"chars": 150,
"preview": "(arbor-component\n (meta-data (version \"0.9-dev\"))\n (label-dict\n {%- for loc, label in label_dict.items() %}\n {{ la"
},
{
"path": "bluepyopt/ephys/templates/cell_template.jinja2",
"chars": 5125,
"preview": "/* \n{%- if banner %}\n{{banner}} \n{%- endif %}\n*/\n{load_file(\"stdrun.hoc\")}\n{load_file(\"import3d.hoc\")}\n\n{%- if global_pa"
},
{
"path": "bluepyopt/evaluators.py",
"chars": 2341,
"preview": "\"\"\"Cell evaluator class\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https"
},
{
"path": "bluepyopt/ipyp/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "bluepyopt/ipyp/bpopt_tasksdb.py",
"chars": 4534,
"preview": "\"\"\"Get stats out of ipyparallel's tasks.db\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part "
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/Ca.channel.nml",
"chars": 2523,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" xmlns:xsi=\"http://ww"
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/Ca_HVA.channel.nml",
"chars": 2475,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" xmlns:xsi=\"http://ww"
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/Ca_LVAst.channel.nml",
"chars": 3662,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" xmlns:xsi=\"http://ww"
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/Ih.channel.nml",
"chars": 1795,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" xmlns:xsi=\"http://ww"
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/Im.channel.nml",
"chars": 2363,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" xmlns:xsi=\"http://ww"
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/K_Pst.channel.nml",
"chars": 3970,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" xmlns:xsi=\"http://ww"
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/K_Tst.channel.nml",
"chars": 3749,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" xmlns:xsi=\"http://ww"
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/KdShu2007.channel.nml",
"chars": 2087,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" \n xmlns:xsi="
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/NaTa_t.channel.nml",
"chars": 2572,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" xmlns:xsi=\"http://ww"
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/NaTs2_t.channel.nml",
"chars": 2597,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" xmlns:xsi=\"http://ww"
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/Nap_Et2.channel.nml",
"chars": 4115,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" xmlns:xsi=\"http://ww"
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/SK_E2.channel.nml",
"chars": 3409,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" xmlns:xsi=\"http://ww"
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/SKv3_1.channel.nml",
"chars": 2714,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" xmlns:xsi=\"http://ww"
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/StochKv_deterministic.channel.nml",
"chars": 1844,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" xmlns:xsi=\"http://ww"
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/baseCaDynamics_E2_NML2.nml",
"chars": 8951,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" \n xmlns:xsi="
},
{
"path": "bluepyopt/neuroml/NeuroML2_mechanisms/pas.channel.nml",
"chars": 717,
"preview": "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<neuroml xmlns=\"http://www.neuroml.org/schema/neuroml2\" xmlns:xsi=\"http://ww"
},
{
"path": "bluepyopt/neuroml/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "bluepyopt/neuroml/biophys.py",
"chars": 20726,
"preview": "\"\"\"Functions to create neuroml biophysiology\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue Brain Project\n This file is part"
},
{
"path": "bluepyopt/neuroml/cell.py",
"chars": 3262,
"preview": "\"\"\"Functions to create neuroml cell\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue Brain Project\n This file is part of BlueP"
},
{
"path": "bluepyopt/neuroml/morphology.py",
"chars": 5644,
"preview": "\"\"\"Functions to create neuroml morphology\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue Brain Project\n This file is part of"
},
{
"path": "bluepyopt/neuroml/simulation.py",
"chars": 2693,
"preview": "\"\"\"Functions to create neuroml simulation\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue Brain Project\n This file is part o"
},
{
"path": "bluepyopt/objectives.py",
"chars": 1008,
"preview": "\"\"\"Objective classes\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https://"
},
{
"path": "bluepyopt/optimisations.py",
"chars": 999,
"preview": "\"\"\"Optimisation class\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https:/"
},
{
"path": "bluepyopt/parameters.py",
"chars": 4897,
"preview": "\"\"\"Parameter classes\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https://"
},
{
"path": "bluepyopt/stoppingCriteria.py",
"chars": 1127,
"preview": "\"\"\"Stopping Criteria class\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n This file is part of BluePyOpt <htt"
},
{
"path": "bluepyopt/tests/.gitignore",
"chars": 41,
"preview": "/.coverage\n/coverage.xml\n/coverage_html/\n"
},
{
"path": "bluepyopt/tests/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "bluepyopt/tests/disable_simplecell_scoop.py",
"chars": 5513,
"preview": "'''\nNote: this is a bizarre test due to the fact that scoop can't be started from\nwithin python:\nhttps://github.com/sora"
},
{
"path": "bluepyopt/tests/expected_results.json",
"chars": 1994,
"preview": "{\n \"TestL5PCEvaluator.test_eval\": {\n \"bAP.soma.AP_width\": 1.9999999999995453,\n \"bAP.soma.AP_height\": 2."
},
{
"path": "bluepyopt/tests/test_bluepyopt.py",
"chars": 991,
"preview": "\"\"\"Tests of the main bluepyopt module\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of Bl"
},
{
"path": "bluepyopt/tests/test_deapext/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "bluepyopt/tests/test_deapext/deapext_test_utils.py",
"chars": 1381,
"preview": "import random\nimport numpy as np\n\nfrom deap import base\nfrom deap import creator\n\n\ndef make_mock_population(features_cou"
},
{
"path": "bluepyopt/tests/test_deapext/test_algorithms.py",
"chars": 4152,
"preview": "\"\"\"bluepyopt.optimisations tests\"\"\"\n\nimport numpy\nfrom unittest import mock\n\nimport deap.creator\nimport deap.benchmarks\n"
},
{
"path": "bluepyopt/tests/test_deapext/test_hype.py",
"chars": 899,
"preview": "\"\"\"bluepyopt.deapext.hype tests\"\"\"\n\nimport numpy\n\nimport bluepyopt.deapext.hype\n\nimport pytest\n\n\n@pytest.mark.unit\ndef t"
},
{
"path": "bluepyopt/tests/test_deapext/test_optimisations.py",
"chars": 3500,
"preview": "\"\"\"bluepyopt.optimisations tests\"\"\"\n\n\nimport bluepyopt.optimisations\nimport bluepyopt.ephys.examples.simplecell\n\nimport "
},
{
"path": "bluepyopt/tests/test_deapext/test_optimisationsCMA.py",
"chars": 2281,
"preview": "\"\"\"bluepyopt.optimisationsCMA tests\"\"\"\n\nimport pytest\nimport bluepyopt\nimport bluepyopt.ephys.examples.simplecell\n\n\n@pyt"
},
{
"path": "bluepyopt/tests/test_deapext/test_selIBEA.py",
"chars": 2126,
"preview": "\"\"\"selIBEA tests\"\"\"\n\n\nimport deap\nimport numpy\n\nimport bluepyopt.deapext\nfrom bluepyopt.deapext.tools.selIBEA \\\n impo"
},
{
"path": "bluepyopt/tests/test_deapext/test_stoppingCriteria.py",
"chars": 491,
"preview": "\"\"\"bluepyopt.stoppingCriteria tests\"\"\"\n\n\nimport bluepyopt.stoppingCriteria\n\nimport pytest\n\n\n@pytest.mark.unit\ndef test_M"
},
{
"path": "bluepyopt/tests/test_deapext/test_utils.py",
"chars": 871,
"preview": "\"\"\"bluepyopt.utils tests\"\"\"\n\nimport multiprocessing\nimport time\n\nimport bluepyopt.deapext.utils as utils\nimport pytest\n\n"
},
{
"path": "bluepyopt/tests/test_ephys/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "bluepyopt/tests/test_ephys/test_acc.py",
"chars": 1262,
"preview": "\"\"\"Unit tests for acc.\"\"\"\n\nfrom bluepyopt.ephys.acc import arbor, ArbLabel\n\n\nimport pytest\n\n\n@pytest.mark.unit\ndef test_"
},
{
"path": "bluepyopt/tests/test_ephys/test_create_acc.py",
"chars": 26655,
"preview": "\"\"\"Tests for create_acc.py\"\"\"\n\n# pylint: disable=W0212\n\nimport json\nimport os\nimport pathlib\nimport re\nimport sys\nimport"
},
{
"path": "bluepyopt/tests/test_ephys/test_create_hoc.py",
"chars": 6000,
"preview": "\"\"\"Tests for create_hoc.py\"\"\"\n\n# pylint: disable=W0212\n\nimport os\n\nfrom bluepyopt.ephys.acc import ArbLabel\nfrom bluepyo"
},
{
"path": "bluepyopt/tests/test_ephys/test_evaluators.py",
"chars": 3771,
"preview": "\"\"\"Test ephys model objects\"\"\"\n\n# pylint: disable=R0914\n\nimport os\n\n\nimport pytest\nimport numpy\n\nfrom bluepyopt import e"
},
{
"path": "bluepyopt/tests/test_ephys/test_extra_features_utils.py",
"chars": 3983,
"preview": "\"\"\"Tests for ephys.extra_features_utils\"\"\"\n\nimport os\n\nimport numpy\nimport pytest\n\nfrom bluepyopt import ephys\n\n\ntestdat"
},
{
"path": "bluepyopt/tests/test_ephys/test_features.py",
"chars": 13292,
"preview": "\"\"\"Tests for ephys.efeatures\"\"\"\n\nimport os\nfrom os.path import join as joinp\n\nimport pytest\nimport numpy\n\nfrom bluepyopt"
},
{
"path": "bluepyopt/tests/test_ephys/test_init.py",
"chars": 1212,
"preview": "\"\"\"bluepy.ephys test\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt <https://"
},
{
"path": "bluepyopt/tests/test_ephys/test_locations.py",
"chars": 12122,
"preview": "\"\"\"bluepyopt.ephys.simulators tests\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2022, EPFL/Blue Brain Project\n\n This file is part of Blue"
},
{
"path": "bluepyopt/tests/test_ephys/test_mechanisms.py",
"chars": 5783,
"preview": "\"\"\"Tests for ephys.mechanisms\"\"\"\n\nimport string\nimport random\nimport json\nimport difflib\n\n\nimport pytest\n\nfrom . import "
},
{
"path": "bluepyopt/tests/test_ephys/test_models.py",
"chars": 11304,
"preview": "\"\"\"Test ephys model objects\"\"\"\n\nimport os\nimport tempfile\nimport contextlib\n\n\nimport pytest\nimport numpy\n\nfrom bluepyopt"
},
{
"path": "bluepyopt/tests/test_ephys/test_morphologies.py",
"chars": 3759,
"preview": "\"\"\"ephys/morphologies.py unit tests\"\"\"\n\nimport json\nimport os\n\n\nimport pytest\n\nimport bluepyopt.ephys as ephys\nfrom blue"
},
{
"path": "bluepyopt/tests/test_ephys/test_objectives.py",
"chars": 7622,
"preview": "\"\"\"Tests for ephys.efeatures\"\"\"\n\nimport os\n\n\nimport pytest\nimport numpy\n\nimport bluepyopt.ephys as ephys\n\n\n@pytest.mark."
},
{
"path": "bluepyopt/tests/test_ephys/test_parameters.py",
"chars": 2137,
"preview": "\"\"\"ephys.parameters tests\"\"\"\n\nimport json\n\n\nimport pytest\nimport numpy\n\nfrom . import utils\nfrom bluepyopt import ephys\n"
},
{
"path": "bluepyopt/tests/test_ephys/test_parameterscalers.py",
"chars": 6158,
"preview": "\"\"\"Test ephys.parameterscalers\"\"\"\n\nimport json\nimport pathlib\nimport tempfile\nimport arbor\n\nimport pytest\n\n\nfrom bluepyo"
},
{
"path": "bluepyopt/tests/test_ephys/test_protocols.py",
"chars": 13886,
"preview": "\"\"\"bluepyopt.ephys.simulators tests\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of Blue"
},
{
"path": "bluepyopt/tests/test_ephys/test_recordings.py",
"chars": 3294,
"preview": "\"\"\"bluepyopt.ephys.simulators tests\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of Blue"
},
{
"path": "bluepyopt/tests/test_ephys/test_serializer.py",
"chars": 2282,
"preview": "\"\"\"Test for ephys.serializer\"\"\"\n\nimport json\n\nimport pytest\nimport numpy\n\nimport bluepyopt.ephys as ephys\n\n\nclass Classf"
},
{
"path": "bluepyopt/tests/test_ephys/test_simulators.py",
"chars": 7780,
"preview": "\"\"\"bluepyopt.ephys.simulators tests\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of Blue"
},
{
"path": "bluepyopt/tests/test_ephys/test_stimuli.py",
"chars": 8346,
"preview": "\"\"\"bluepyopt.ephys.simulators tests\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of Blue"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/TimeVoltageResponse.csv",
"chars": 202089,
"preview": ",time,voltage\n0,0.0,-65.0\n1,0.000664036278079,-65.0020897598\n2,0.00229531725338,-65.0063371218\n3,0.00606004223107,-65.01"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/CCell/CCell.json",
"chars": 236,
"preview": "{ \n \"cell_model_name\": \"CCell\",\n \"produced_by\": \"Created by BluePyOpt(1.12.62) at 2022-07-28 17:15:28.166082\", \n \"mor"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/CCell/CCell_decor.acc",
"chars": 487,
"preview": "(arbor-component\n (meta-data (version \"0.9-dev\"))\n (decor\n (default (gSKv3_1bar_SKv3_1 65 (scalar 1.0)))\n (paint"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/CCell/CCell_label_dict.acc",
"chars": 263,
"preview": "(arbor-component\n (meta-data (version \"0.9-dev\"))\n (label-dict \n (region-def \"all\" (all)) \n (region-def \"apic\" ("
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/CCell/simple_axon_replacement.acc",
"chars": 361,
"preview": "(arbor-component \n (meta-data \n (version \"0.9-dev\"))\n (morphology \n (branch 0 -1 \n (segment 0 \n (poi"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/expsyn/simple.swc",
"chars": 100,
"preview": "# Dummy granule cell morphology\n1 1 -5.0 0.0 0.0 5.0 -1\n2 1 0.0 0.0 0.0 5.0 1\n3 1 5.0 0.0 0.0 5.0 2\n"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/expsyn/simple_cell.json",
"chars": 257,
"preview": "{ \n \"cell_model_name\": \"simple_cell\",\n \"produced_by\": \"Created by BluePyOpt(1.12.113) at 2022-11-07 01:06:09.370611\", "
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/expsyn/simple_cell_decor.acc",
"chars": 302,
"preview": "(arbor-component\n (meta-data (version \"0.9-dev\"))\n (decor\n (paint (region \"soma\") (membrane-capacitance 0.01 (scala"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/expsyn/simple_cell_label_dict.acc",
"chars": 304,
"preview": "(arbor-component\n (meta-data (version \"0.9-dev\"))\n (label-dict\n (region-def \"all\" (all))\n (region-def \"soma\" (ta"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/l5pc/C060114A7.asc",
"chars": 946254,
"preview": ";\tV3 text file written for MicroBrightField products.\r\n(ImageCoords)\r\n\r\n(\"CellBody\"\r\n (Color RGB (255, 255, 128))\r\n (C"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/l5pc/C060114A7_axon_replacement.acc",
"chars": 660,
"preview": "(arbor-component \n (meta-data \n (version \"0.9-dev\"))\n (morphology \n (branch 0 -1 \n (segment 0 \n (poi"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/l5pc/C060114A7_modified.acc",
"chars": 792629,
"preview": "(arbor-component \n (meta-data \n (version \"0.9-dev\"))\n (morphology \n (branch 0 -1 \n (segment 0 \n (poi"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/l5pc/l5pc.json",
"chars": 337,
"preview": "{ \n \"cell_model_name\": \"l5pc\",\n \"produced_by\": \"Created by BluePyOpt(1.12.113) at 2022-11-06 18:21:20.822883\", \n \"mor"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/l5pc/l5pc_decor.acc",
"chars": 3292,
"preview": "(arbor-component\n (meta-data (version \"0.9-dev\"))\n (decor\n (default (membrane-potential -65 (scalar 1.0)))\n (def"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/l5pc/l5pc_label_dict.acc",
"chars": 257,
"preview": "(arbor-component\n (meta-data (version \"0.9-dev\"))\n (label-dict\n (region-def \"all\" (all))\n (region-def \"soma\" (ta"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/l5pc_py37/l5pc_decor.acc",
"chars": 3310,
"preview": "(arbor-component\n (meta-data (version \"0.9-dev\"))\n (decor\n (default (membrane-potential -65 (scalar 1.0)))\n (def"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/simplecell/simple.swc",
"chars": 100,
"preview": "# Dummy granule cell morphology\n1 1 -5.0 0.0 0.0 5.0 -1\n2 1 0.0 0.0 0.0 5.0 1\n3 1 5.0 0.0 0.0 5.0 2\n"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/simplecell/simple_axon_replacement.acc",
"chars": 631,
"preview": "(arbor-component \n (meta-data \n (version \"0.9-dev\"))\n (morphology \n (branch 0 -1 \n (segment 0 \n (poi"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/simplecell/simple_cell.json",
"chars": 349,
"preview": "{ \n \"cell_model_name\": \"simple_cell\",\n \"produced_by\": \"Created by BluePyOpt(1.12.113) at 2022-11-06 18:29:03.845296\", "
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/simplecell/simple_cell_decor.acc",
"chars": 256,
"preview": "(arbor-component\n (meta-data (version \"0.9-dev\"))\n (decor\n (paint (region \"soma\") (membrane-capacitance 0.01 (scala"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/simplecell/simple_cell_label_dict.acc",
"chars": 257,
"preview": "(arbor-component\n (meta-data (version \"0.9-dev\"))\n (label-dict\n (region-def \"all\" (all))\n (region-def \"soma\" (ta"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/simplecell/simple_modified.acc",
"chars": 917,
"preview": "(arbor-component \n (meta-data \n (version \"0.9-dev\"))\n (morphology \n (branch 0 -1 \n (segment 0 \n (poi"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/templates/cell_json_template.jinja2",
"chars": 678,
"preview": "{ \n \"cell_model_name\": \"{{template_name}}\",\n {%- if banner %}\n \"produced_by\": \"{{banner}} (from {{ custom_param }})\","
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/templates/decor_acc_template.jinja2",
"chars": 1697,
"preview": "(arbor-component\n (meta-data (version \"0.9-dev\"))\n (meta-data (info \"test-decor\"))\n (decor\n {%- for mech, params i"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/acc/templates/label_dict_acc_template.jinja2",
"chars": 212,
"preview": "(arbor-component\n (meta-data (version \"0.9-dev\"))\n (meta-data (info \"test-label-dict\"))\n (label-dict\n {%- for loc, l"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/apic.swc",
"chars": 137,
"preview": "# Dummy cell morphology\n1 1 -5.0 0.0 0.0 5.0 -1\n2 1 0.0 0.0 0.0 5.0 1\n3 1 5.0 0.0 0.0 5.0 2\n4 4 5.0 0.0 0.0 1.0 3\n5 4 20"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/simple.swc",
"chars": 100,
"preview": "# Dummy granule cell morphology\n1 1 -5.0 0.0 0.0 5.0 -1\n2 1 0.0 0.0 0.0 5.0 1\n3 1 5.0 0.0 0.0 5.0 2\n"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/simple.wrong",
"chars": 0,
"preview": ""
},
{
"path": "bluepyopt/tests/test_ephys/testdata/simple_ax1.swc",
"chars": 145,
"preview": "# Dummy granule cell morphology\n1 1 -5.0 0.0 0.0 5.0 -1\n2 1 0.0 0.0 0.0 5.0 1\n3 1 5.0 0.0 0.0 5.0 2\n4 2 5.0 0.0 0.0 1.0 "
},
{
"path": "bluepyopt/tests/test_ephys/testdata/simple_ax2.asc",
"chars": 387,
"preview": ";\tV3 text file written for MicroBrightField products.\n\n(\"CellBody\"\n (CellBody)\n ( -5 0 0 0) ; 1, 1\n ( 0"
},
{
"path": "bluepyopt/tests/test_ephys/testdata/simple_ax2.swc",
"chars": 169,
"preview": "# Dummy granule cell morphology\n1 1 -5.0 0.0 0.0 5.0 -1\n2 1 0.0 0.0 0.0 5.0 1\n3 1 5.0 0.0 0.0 5.0 2\n4 2 5.0 0.0 0.0 1.0 "
},
{
"path": "bluepyopt/tests/test_ephys/testdata/test.jinja2",
"chars": 4835,
"preview": "/* \nTest template\n\n{%- if banner %}\n{{banner}} \n{%- endif %}\n*/\n{load_file(\"stdrun.hoc\")}\n{load_file(\"import3d.hoc\")}\n\n{"
},
{
"path": "bluepyopt/tests/test_ephys/testmodels/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "bluepyopt/tests/test_ephys/testmodels/dummycells.py",
"chars": 3265,
"preview": "\"\"\"Dummy cell model used for testing\"\"\"\n\nimport bluepyopt.ephys as ephys\n\n\nclass DummyCellModel1(ephys.models.Model):\n\n "
},
{
"path": "bluepyopt/tests/test_ephys/utils.py",
"chars": 1496,
"preview": "\"\"\"EPhys test utils\"\"\"\n\nfrom bluepyopt import ephys\nfrom bluepyopt.ephys.parameters import (\n NrnGlobalParameter, Nrn"
},
{
"path": "bluepyopt/tests/test_evaluators.py",
"chars": 1101,
"preview": "\"\"\"bluepyopt.evaluators tests\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt "
},
{
"path": "bluepyopt/tests/test_l5pc.py",
"chars": 6322,
"preview": "\"\"\"Test l5pc example\"\"\"\n\nimport json\nimport os\nimport sys\nfrom contextlib import contextmanager\n\nimport bluepyopt\nfrom b"
},
{
"path": "bluepyopt/tests/test_lfpy.py",
"chars": 989,
"preview": "\"\"\"Functional LFPy test\"\"\"\n\nimport os\nimport sys\n\nimport pytest\n\nL5PC_LFPY_PATH = os.path.abspath(\n os.path.join(os.p"
},
{
"path": "bluepyopt/tests/test_neuroml_fcts.py",
"chars": 15707,
"preview": "\"\"\"Test neuroml functions\"\"\"\n\nimport os\nimport sys\n\nimport efel\nimport neuroml\nimport numpy\nimport pytest\nfrom pyneuroml"
},
{
"path": "bluepyopt/tests/test_parameters.py",
"chars": 4123,
"preview": "\"\"\"bluepyopt.parameters tests\"\"\"\n\n\"\"\"\nCopyright (c) 2016-2020, EPFL/Blue Brain Project\n\n This file is part of BluePyOpt "
},
{
"path": "bluepyopt/tests/test_simplecell.py",
"chars": 1968,
"preview": "\"\"\"Simple cell example test class\"\"\"\n\nimport os\nimport sys\n\nSIMPLECELL_PATH = os.path.abspath(\n os.path.join(os.path."
},
{
"path": "bluepyopt/tests/test_stochkv.py",
"chars": 3289,
"preview": "\"\"\"Test l5pc example\"\"\"\n\nimport os\nimport sys\nimport difflib\n\nSTOCHKV_PATH = os.path.abspath(os.path.join(os.path.dirnam"
},
{
"path": "bluepyopt/tests/test_tools.py",
"chars": 833,
"preview": "\"\"\"Test bluepyopt.tools\"\"\"\n\nimport pytest\n\n\n@pytest.mark.unit\ndef test_load():\n \"\"\"bluepyopt.tools: test import\"\"\"\n\n "
},
{
"path": "bluepyopt/tests/testdata/l5pc_validate_neuron_arbor/param_values.json",
"chars": 5852,
"preview": "[\n {\n \"gNaTs2_tbar_NaTs2_t.apical\": 0.024728378969164945,\n \"gSKv3_1bar_SKv3_1.apical\": 0.03798941330025"
},
{
"path": "bluepyopt/tools.py",
"chars": 218,
"preview": "\"\"\"BluePyOpt tools\"\"\"\n\nimport hashlib\n\n\ndef uint32_seed(string):\n \"\"\"Get unsigned int seed of a string\"\"\"\n\n hex_va"
},
{
"path": "cloud-config/README.md",
"chars": 5848,
"preview": "# Single Cell Optimization\n\n## Introduction\n\nThis documentation outlines how to setup distributed single cell optimizati"
},
{
"path": "cloud-config/config/amazon/README.md",
"chars": 4582,
"preview": "# Amazon Setup\n\nThis documents how an Amazon Web Services cluster can be created.\n\nNote: It is quite easy to run up a la"
},
{
"path": "cloud-config/config/amazon/ansible.cfg",
"chars": 167,
"preview": "[defaults]\ninventory = ./hosts\nprivate_key_file = single_cell_opt.pem\nremote_user = ubuntu\nhost_key_checking = False\n\n[s"
},
{
"path": "cloud-config/config/amazon/create_instance.yaml",
"chars": 2739,
"preview": "- name: Setup Ubuntu machines\n vars:\n region: us-west-2\n ami_image: ami-187c9978 # http://cloud-images.ubuntu.com"
},
{
"path": "cloud-config/config/amazon/gather_config.py",
"chars": 2931,
"preview": "#!/usr/bin/env python\nimport argparse\n\nimport boto3\nfrom jinja2 import Environment\n\n\nKEY_NAME = 'single_cell_opt.pem'\nHE"
},
{
"path": "cloud-config/config/amazon/site.yaml",
"chars": 649,
"preview": "---\n\n- name: Install Neuron Optimizer Framework Head\n hosts: neuron-optimizer-head\n sudo: true\n\n vars_files:\n - var"
},
{
"path": "cloud-config/config/amazon/vars.yaml",
"chars": 955,
"preview": "user_name: neuron\nworkspace: ~/workspace\nvenv: \"{{workspace}}/venv\"\nbuild_dir: \"{{workspace}}/build\"\ninstall_dir: \"{{wor"
},
{
"path": "cloud-config/config/cluster-user/README.md",
"chars": 1539,
"preview": "# Cluster User\n\nIf you are not the administrator of a cluster, but have access to one, this\ndocument outlines how to set"
},
{
"path": "cloud-config/config/cluster-user/ansible.cfg",
"chars": 31,
"preview": "[defaults]\ninventory = ./hosts\n"
},
{
"path": "cloud-config/config/cluster-user/hosts",
"chars": 31,
"preview": "[neuron-optimizer-worker]\nviz1\n"
},
{
"path": "cloud-config/config/cluster-user/site.yaml",
"chars": 301,
"preview": "---\n- name: Install Neuron Optimizer Framework Worker\n hosts: neuron-optimizer-worker\n\n vars_files:\n - vars.yaml\n\n "
},
{
"path": "cloud-config/config/cluster-user/vars.yaml",
"chars": 1009,
"preview": "#Note: you may need to change this\nuser_name: \"{{ ansible_user_id }}\"\nworkspace: ~/workspace\nvenv: \"{{workspace}}/venv\"\n"
},
{
"path": "cloud-config/config/vagrant/README.md",
"chars": 872,
"preview": "# Vagrant Setup\n\n 1. Install vagrant\n\n 2. Get the trusty64 box:\n\n `$ vagrant box add ubuntu/trusty64`\n\n 3. Use the in"
},
{
"path": "cloud-config/config/vagrant/Vagrantfile",
"chars": 879,
"preview": "# -*- mode: ruby -*-\n# vi: set ft=ruby :\n\n# Vagrantfile API/syntax version. Don't touch unless you know what you're doin"
},
{
"path": "cloud-config/config/vagrant/ansible.cfg",
"chars": 132,
"preview": "[defaults]\ninventory = ./hosts\nprivate_key_file = ~/.vagrant.d/insecure_private_key\nremote_user = vagrant\nhost_key_check"
},
{
"path": "cloud-config/config/vagrant/hosts",
"chars": 212,
"preview": "[neuron-optimizer-head]\n192.168.61.10\n\n[neuron-optimizer-worker]\n192.168.61.10\n192.168.61.20\n192.168.61.21\n192.168.61.22"
},
{
"path": "cloud-config/config/vagrant/site.yaml",
"chars": 649,
"preview": "---\n\n- name: Install Neuron Optimizer Framework Head\n hosts: neuron-optimizer-head\n sudo: true\n\n vars_files:\n - var"
},
{
"path": "cloud-config/config/vagrant/vars.yaml",
"chars": 954,
"preview": "user_name: neuron\nworkspace: ~/workspace\nvenv: \"{{workspace}}/venv\"\nbuild_dir: \"{{workspace}}/build\"\ninstall_dir: \"{{wor"
},
{
"path": "cloud-config/roles/base/tasks/main.yaml",
"chars": 491,
"preview": "---\n#note: this is for debian and ubuntu distributions\n- name: update apt cache\n apt: update_cache=yes\n\n- name: Install"
},
{
"path": "cloud-config/roles/deap/tasks/main.yaml",
"chars": 1146,
"preview": "---\n\n- name: Upgrade pip\n pip: name=pip version=\"{{ pip_version }}\" virtualenv={{ venv }}\n\n- name: Install numpy\n pip:"
},
{
"path": "cloud-config/roles/granule-example/tasks/main.yaml",
"chars": 330,
"preview": "- name: Get eFEL Source\n git: repo=https://github.com/BlueBrain/eFEL.git dest=~/workspace/eFEL\n\n- name: Compile models\n"
},
{
"path": "cloud-config/roles/neuron/tasks/main.yaml",
"chars": 1814,
"preview": "---\n- include: python27.yaml\n when: \"{{ python27_build }}\"\n\n- set_fact: extra_path=\"{{ pythonbin.stdout + ':' }}\"\n whe"
},
{
"path": "cloud-config/roles/neuron/tasks/python27.yaml",
"chars": 1281,
"preview": "---\n#NOTE: need zlib1g-dev, libssl-dev package on ubuntu/debian\n\n- name: Create directories\n file: path={{ item }} stat"
},
{
"path": "cloud-config/roles/scoop-master/tasks/main.yaml",
"chars": 370,
"preview": "---\n- name: Configure User\n user: name={{ user_name }} generate_ssh_key=yes\n\n- name: Downloading pub key\n fetch: src=/"
},
{
"path": "codecov.yml",
"chars": 137,
"preview": "coverage:\n range: \"90...100\"\n \n status:\n project:\n default:\n target: \"90%\" \n threshold: \"5%\"\n"
},
{
"path": "docs/.gitignore",
"chars": 7,
"preview": "/build\n"
},
{
"path": "docs/Makefile",
"chars": 7410,
"preview": "# Makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS =\nSPHINXBUILD "
},
{
"path": "docs/source/.gitignore",
"chars": 34,
"preview": "/ephys/\n/optimisations/\n/deapext/\n"
},
{
"path": "docs/source/_templates/module.rst",
"chars": 87,
"preview": "{{ fullname }}\n{{ underline }}\n\n.. automodule:: {{ fullname }}\n :members: \n"
},
{
"path": "docs/source/api.rst",
"chars": 310,
"preview": ".. BluePyOpt documentation master file, created by\n sphinx-quickstart on Mon May 11 14:40:15 2015.\n You can adapt th"
}
]
// ... and 163 more files (download for full content)
About this extraction
This page contains the full source code of the BlueBrain/BluePyOpt GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 363 files (30.7 MB), approximately 2.9M tokens, and a symbol index with 1034 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.