Showing preview only (4,907K chars total). Download the full file or copy to clipboard to get everything.
Repository: BrainCog-X/Brain-Cog
Branch: main
Commit: f9b879f75da2
Files: 685
Total size: 4.5 MB
Directory structure:
gitextract_qe2qoke6/
├── .gitignore
├── LICENSE
├── README.md
├── braincog/
│ ├── __init__.py
│ ├── base/
│ │ ├── __init__.py
│ │ ├── brainarea/
│ │ │ ├── BrainArea.py
│ │ │ ├── IPL.py
│ │ │ ├── Insula.py
│ │ │ ├── PFC.py
│ │ │ ├── __init__.py
│ │ │ ├── basalganglia.py
│ │ │ └── dACC.py
│ │ ├── connection/
│ │ │ ├── CustomLinear.py
│ │ │ ├── __init__.py
│ │ │ └── layer.py
│ │ ├── conversion/
│ │ │ ├── __init__.py
│ │ │ ├── convertor.py
│ │ │ ├── merge.py
│ │ │ └── spicalib.py
│ │ ├── encoder/
│ │ │ ├── __init__.py
│ │ │ ├── encoder.py
│ │ │ ├── population_coding.py
│ │ │ └── qs_coding.py
│ │ ├── learningrule/
│ │ │ ├── BCM.py
│ │ │ ├── Hebb.py
│ │ │ ├── RSTDP.py
│ │ │ ├── STDP.py
│ │ │ ├── STP.py
│ │ │ └── __init__.py
│ │ ├── node/
│ │ │ ├── __init__.py
│ │ │ └── node.py
│ │ ├── strategy/
│ │ │ ├── LateralInhibition.py
│ │ │ ├── __init__.py
│ │ │ └── surrogate.py
│ │ └── utils/
│ │ ├── __init__.py
│ │ ├── criterions.py
│ │ └── visualization.py
│ ├── datasets/
│ │ ├── CUB2002011.py
│ │ ├── ESimagenet/
│ │ │ ├── ES_imagenet.py
│ │ │ ├── __init__.py
│ │ │ └── reconstructed_ES_imagenet.py
│ │ ├── NOmniglot/
│ │ │ ├── NOmniglot.py
│ │ │ ├── __init__.py
│ │ │ ├── nomniglot_full.py
│ │ │ ├── nomniglot_nw_ks.py
│ │ │ ├── nomniglot_pair.py
│ │ │ └── utils.py
│ │ ├── StanfordDogs.py
│ │ ├── TinyImageNet.py
│ │ ├── __init__.py
│ │ ├── bullying10k/
│ │ │ ├── __init__.py
│ │ │ └── bullying10k.py
│ │ ├── cut_mix.py
│ │ ├── datasets.py
│ │ ├── gen_input_signal.py
│ │ ├── hmdb_dvs/
│ │ │ ├── __init__.py
│ │ │ └── hmdb_dvs.py
│ │ ├── ncaltech101/
│ │ │ ├── __init__.py
│ │ │ └── ncaltech101.py
│ │ ├── rand_aug.py
│ │ ├── scripts/
│ │ │ ├── testlist01.txt
│ │ │ └── ucf101_dvs_preprocessing.py
│ │ ├── ucf101_dvs/
│ │ │ ├── __init__.py
│ │ │ └── ucf101_dvs.py
│ │ └── utils.py
│ ├── model_zoo/
│ │ ├── NeuEvo/
│ │ │ ├── __init__.py
│ │ │ ├── architect.py
│ │ │ ├── genotypes.py
│ │ │ ├── model.py
│ │ │ ├── model_search.py
│ │ │ ├── operations.py
│ │ │ └── others.py
│ │ ├── __init__.py
│ │ ├── backeinet.py
│ │ ├── base_module.py
│ │ ├── bdmsnn.py
│ │ ├── convnet.py
│ │ ├── fc_snn.py
│ │ ├── glsnn.py
│ │ ├── linearNet.py
│ │ ├── nonlinearNet.py
│ │ ├── qsnn.py
│ │ ├── resnet.py
│ │ ├── resnet19_snn.py
│ │ ├── rsnn.py
│ │ ├── sew_resnet.py
│ │ └── vgg_snn.py
│ └── utils.py
├── docs/
│ ├── Makefile
│ ├── make.bat
│ └── source/
│ ├── conf.py
│ ├── examples/
│ │ ├── Brain_Cognitive_Function_Simulation/
│ │ │ ├── drosophila.md
│ │ │ └── index.rst
│ │ ├── Decision_Making/
│ │ │ ├── BDM_SNN.md
│ │ │ ├── RL.md
│ │ │ └── index.rst
│ │ ├── Knowledge_Representation_and_Reasoning/
│ │ │ ├── CKRGSNN.md
│ │ │ ├── CRSNN.md
│ │ │ ├── SPSNN.md
│ │ │ ├── index.rst
│ │ │ └── musicMemory.md
│ │ ├── Multi-scale_Brain_Structure_Simulation/
│ │ │ ├── Corticothalamic_minicolumn.md
│ │ │ ├── HumanBrain.md
│ │ │ ├── Human_PFC.md
│ │ │ ├── MacaqueBrain.md
│ │ │ ├── index.rst
│ │ │ └── mouse_brain.md
│ │ ├── Perception_and_Learning/
│ │ │ ├── Conversion.md
│ │ │ ├── MultisensoryIntegration.md
│ │ │ ├── QSNN.md
│ │ │ ├── UnsupervisedSTDP.md
│ │ │ ├── img_cls/
│ │ │ │ ├── bp.md
│ │ │ │ ├── glsnn.md
│ │ │ │ └── index.rst
│ │ │ └── index.rst
│ │ ├── Social_Cognition/
│ │ │ ├── Mirror_Test.md
│ │ │ ├── ToM.md
│ │ │ └── index.rst
│ │ └── index.rst
│ ├── index.rst
│ ├── modules.rst
│ └── setup.rst
├── docs.md
├── documents/
│ ├── Data_engine.md
│ ├── Lectures.md
│ ├── Pub_brain_inspired_AI.md
│ ├── Pub_brain_simulation.md
│ ├── Pub_sh_codesign.md
│ ├── Publication.md
│ └── Tutorial.md
├── examples/
│ ├── Brain_Cognitive_Function_Simulation/
│ │ └── drosophila/
│ │ ├── README.md
│ │ └── drosophila.py
│ ├── Embodied_Cognition/
│ │ └── RHI/
│ │ ├── RHI_Test.py
│ │ ├── RHI_Train.py
│ │ └── ReadMe.md
│ ├── Hardware_acceleration/
│ │ ├── README.md
│ │ ├── firefly_v1_schedule_on_pynq.py
│ │ ├── standalone_utils.py
│ │ ├── ultra96_test.py
│ │ └── zcu104_test.py
│ ├── Knowledge_Representation_and_Reasoning/
│ │ ├── CKRGSNN/
│ │ │ ├── README.md
│ │ │ ├── main.py
│ │ │ └── sub_Conceptnet.csv
│ │ ├── CRSNN/
│ │ │ ├── README.md
│ │ │ └── main.py
│ │ ├── SPSNN/
│ │ │ ├── README.md
│ │ │ └── main.py
│ │ └── musicMemory/
│ │ ├── Areas/
│ │ │ ├── apac.py
│ │ │ ├── cortex.py
│ │ │ ├── pac.py
│ │ │ └── pfc.py
│ │ ├── Modal/
│ │ │ ├── PAC.py
│ │ │ ├── cluster.py
│ │ │ ├── composercluster.py
│ │ │ ├── composerlayer.py
│ │ │ ├── composerlifneuron.py
│ │ │ ├── genrecluster.py
│ │ │ ├── genrelayer.py
│ │ │ ├── genrelifneuron.py
│ │ │ ├── izhikevichneuron.py
│ │ │ ├── layer.py
│ │ │ ├── lifneuron.py
│ │ │ ├── note.py
│ │ │ ├── notecluster.py
│ │ │ ├── notelifneuron.py
│ │ │ ├── notesequencelayer.py
│ │ │ ├── pitch.py
│ │ │ ├── sequencelayer.py
│ │ │ ├── sequencememory.py
│ │ │ ├── synapse.py
│ │ │ ├── tempocluster.py
│ │ │ ├── tempolifneuron.py
│ │ │ ├── temposequencelayer.py
│ │ │ ├── titlecluster.py
│ │ │ ├── titlelayer.py
│ │ │ └── titlelifneuron.py
│ │ ├── README.md
│ │ ├── api/
│ │ │ └── music_engine_api.py
│ │ ├── conf/
│ │ │ ├── GenreData.txt
│ │ │ ├── MIDIData.txt
│ │ │ └── conf.py
│ │ ├── inputs/
│ │ │ ├── 1.txt
│ │ │ ├── Data.txt
│ │ │ ├── GenreData.txt
│ │ │ ├── MIDIData.txt
│ │ │ ├── chords.csv
│ │ │ ├── chords.xlsx
│ │ │ ├── information.csv
│ │ │ ├── keyIndex.csv
│ │ │ ├── keys.csv
│ │ │ ├── keys.xlsx
│ │ │ ├── modeindex.csv
│ │ │ ├── modeindex.xlsx
│ │ │ ├── pitch2midi.csv
│ │ │ └── tones2.csv
│ │ ├── result_output/
│ │ │ └── tone learning/
│ │ │ ├── C major_20241121155522.mid
│ │ │ ├── C major_20241122093822.mid
│ │ │ ├── C major_20241122094000.mid
│ │ │ ├── C major_20241122094419.mid
│ │ │ └── C major_20241122094736.mid
│ │ ├── task/
│ │ │ ├── Bach_generated.mid
│ │ │ ├── Classical_generated.mid
│ │ │ ├── Sonate C Major.Mid_recall.mid
│ │ │ ├── melody_generated.mid
│ │ │ ├── mode-conditioned learning.py
│ │ │ ├── musicGeneration.py
│ │ │ └── musicMemory.py
│ │ ├── testData/
│ │ │ ├── Bach/
│ │ │ │ └── prelude C major.mid
│ │ │ ├── JayZhou/
│ │ │ │ └── rainbow.mid
│ │ │ └── Mozart/
│ │ │ └── Sonate C major.mid
│ │ └── tools/
│ │ ├── __init__.py
│ │ ├── generateData.py
│ │ ├── hamonydataset_test.py
│ │ ├── msg.py
│ │ ├── msgq.py
│ │ ├── oscillations.py
│ │ ├── position.txt
│ │ ├── readjson.py
│ │ ├── testSound.py
│ │ ├── testmusic21.py
│ │ ├── testopengl.py
│ │ ├── testwave.py
│ │ └── xmlParser.py
│ ├── MotorControl/
│ │ └── experimental/
│ │ ├── README.md
│ │ ├── brain_area.py
│ │ ├── main.py
│ │ └── model.py
│ ├── Multiscale_Brain_Structure_Simulation/
│ │ ├── CorticothalamicColumn/
│ │ │ ├── README.md
│ │ │ ├── data/
│ │ │ │ ├── __init__.py
│ │ │ │ └── globaldata.py
│ │ │ ├── main.py
│ │ │ ├── model/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── cortex.py
│ │ │ │ ├── cortex_thalamus.py
│ │ │ │ ├── dendrite.py
│ │ │ │ ├── fire.csv
│ │ │ │ ├── layer.py
│ │ │ │ ├── synapse.py
│ │ │ │ └── thalamus.py
│ │ │ └── tools/
│ │ │ ├── __init__.py
│ │ │ ├── cortical.csv
│ │ │ ├── exdata.py
│ │ │ ├── layer.csv
│ │ │ ├── neuron.csv
│ │ │ └── synapse.csv
│ │ ├── Corticothalamic_Brain_Model/
│ │ │ ├── Bioinformatics_propofol_circle.py
│ │ │ ├── Readme.md
│ │ │ └── spectrogram.py
│ │ ├── HumanBrain/
│ │ │ ├── README.md
│ │ │ ├── human_brain.py
│ │ │ └── human_multi.py
│ │ ├── Human_Brain_Model/
│ │ │ ├── NA.py
│ │ │ ├── Readme.md
│ │ │ ├── gc.py
│ │ │ ├── main_246.py
│ │ │ ├── main_84.py
│ │ │ ├── pci.py
│ │ │ ├── pci_246.py
│ │ │ └── spectrogram.py
│ │ ├── Human_PFC_Model/
│ │ │ ├── README.md
│ │ │ └── Six_Layer_PFC.py
│ │ ├── MacaqueBrain/
│ │ │ ├── README.md
│ │ │ └── macaque_brain.py
│ │ └── MouseBrain/
│ │ ├── README.md
│ │ └── mouse_brain.py
│ ├── Perception_and_Learning/
│ │ ├── Conversion/
│ │ │ ├── burst_conversion/
│ │ │ │ ├── CIFAR10_VGG16.py
│ │ │ │ ├── README.md
│ │ │ │ └── converted_CIFAR10.py
│ │ │ └── msat_conversion/
│ │ │ ├── CIFAR10_VGG16.py
│ │ │ ├── README.md
│ │ │ ├── converted_CIFAR10.py
│ │ │ └── convertor.py
│ │ ├── IllusionPerception/
│ │ │ └── AbuttingGratingIllusion/
│ │ │ ├── distortion/
│ │ │ │ ├── __init__.py
│ │ │ │ └── abutting_grating_illusion/
│ │ │ │ ├── __init__.py
│ │ │ │ └── abutting_grating_distortion.py
│ │ │ └── main.py
│ │ ├── MultisensoryIntegration/
│ │ │ ├── README.md
│ │ │ └── code/
│ │ │ ├── MultisensoryIntegrationDEMO_AM.py
│ │ │ ├── MultisensoryIntegrationDEMO_IM.py
│ │ │ └── measure_and_visualization.py
│ │ ├── NeuEvo/
│ │ │ ├── auto_augment.py
│ │ │ ├── main.py
│ │ │ ├── separate_loss.py
│ │ │ ├── train.py
│ │ │ ├── train_search.py
│ │ │ └── utils.py
│ │ ├── QSNN/
│ │ │ ├── README.md
│ │ │ └── main.py
│ │ ├── UnsupervisedSTDP/
│ │ │ ├── Readme.md
│ │ │ └── codef.py
│ │ └── img_cls/
│ │ ├── bp/
│ │ │ ├── README.md
│ │ │ ├── main.py
│ │ │ ├── main_backei.py
│ │ │ └── main_simplified.py
│ │ ├── glsnn/
│ │ │ ├── README.md
│ │ │ └── cls_glsnn.py
│ │ ├── spiking_capsnet/
│ │ │ ├── README.md
│ │ │ └── spikingcaps.py
│ │ └── transfer_for_dvs/
│ │ ├── GradCAM_visualization.py
│ │ ├── README.md
│ │ ├── datasets.py
│ │ ├── main.py
│ │ ├── main_transfer.py
│ │ └── main_visual_losslandscape.py
│ ├── Snn_safety/
│ │ ├── DPSNN/
│ │ │ ├── Readme.txt
│ │ │ ├── load_data.py
│ │ │ ├── main_dpsnn.py
│ │ │ └── model.py
│ │ └── RandHet-SNN/
│ │ ├── README.md
│ │ ├── evaluate.py
│ │ ├── my_node.py
│ │ ├── sew_resnet.py
│ │ ├── train.py
│ │ └── utils.py
│ ├── Social_Cognition/
│ │ ├── FOToM/
│ │ │ ├── algorithms/
│ │ │ │ ├── ToM_class.py
│ │ │ │ ├── __init__.py
│ │ │ │ ├── maddpg.py
│ │ │ │ └── tom11.py
│ │ │ ├── common/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── distributions.py
│ │ │ │ ├── tile_images.py
│ │ │ │ └── vec_env/
│ │ │ │ ├── __init__.py
│ │ │ │ └── vec_env.py
│ │ │ ├── evaluate.py
│ │ │ ├── main.py
│ │ │ ├── multiagent/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── core.py
│ │ │ │ ├── environment.py
│ │ │ │ ├── multi_discrete.py
│ │ │ │ ├── policy.py
│ │ │ │ ├── rendering.py
│ │ │ │ ├── scenario.py
│ │ │ │ └── scenarios/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── hetero_spread.py
│ │ │ │ ├── simple.py
│ │ │ │ ├── simple_adversary.py
│ │ │ │ ├── simple_crypto.py
│ │ │ │ ├── simple_push.py
│ │ │ │ ├── simple_reference.py
│ │ │ │ ├── simple_speaker_listener.py
│ │ │ │ ├── simple_spread.py
│ │ │ │ ├── simple_tag.py
│ │ │ │ └── simple_world_comm.py
│ │ │ ├── readme.md
│ │ │ └── utils/
│ │ │ ├── __init__.py
│ │ │ ├── agents.py
│ │ │ ├── buffer.py
│ │ │ ├── env_wrappers.py
│ │ │ ├── make_env.py
│ │ │ ├── misc.py
│ │ │ ├── multiprocessing.py
│ │ │ ├── networks.py
│ │ │ └── noise.py
│ │ ├── Intention_Prediction/
│ │ │ └── Intention_Prediction.py
│ │ ├── MAToM-SNN/
│ │ │ ├── LICENSE
│ │ │ ├── MPE/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── agents/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── agents.py
│ │ │ │ ├── common/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── distributions.py
│ │ │ │ │ ├── tile_images.py
│ │ │ │ │ └── vec_env/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── vec_env.py
│ │ │ │ ├── main.py
│ │ │ │ ├── multiagent/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── scenarios/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── simple.py
│ │ │ │ │ ├── simple_crypto.py
│ │ │ │ │ ├── simple_push.py
│ │ │ │ │ ├── simple_reference.py
│ │ │ │ │ ├── simple_speaker_listener.py
│ │ │ │ │ ├── simple_spread.py
│ │ │ │ │ └── simple_world_comm.py
│ │ │ │ ├── policy/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── maddpg.py
│ │ │ │ └── utils/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── buffer.py
│ │ │ │ ├── env_wrappers.py
│ │ │ │ ├── make_env.py
│ │ │ │ ├── misc.py
│ │ │ │ ├── multiprocessing.py
│ │ │ │ ├── networks.py
│ │ │ │ └── noise.py
│ │ │ ├── README.md
│ │ │ └── STAG/
│ │ │ ├── agents/
│ │ │ │ ├── __init__.py
│ │ │ │ └── sagent.py
│ │ │ ├── common_sr/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── arguments.py
│ │ │ │ ├── dummy_vec_env.py
│ │ │ │ ├── multiprocessing_env.py
│ │ │ │ ├── replay_buffer.py
│ │ │ │ └── srollout.py
│ │ │ ├── envs/
│ │ │ │ ├── Stag_Hunt_env.py
│ │ │ │ ├── __init__.py
│ │ │ │ ├── abstract.py
│ │ │ │ └── constants.py
│ │ │ ├── main_spiking.py
│ │ │ ├── network/
│ │ │ │ ├── __init__.py
│ │ │ │ └── spiking_net.py
│ │ │ ├── policy/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── dqn.py
│ │ │ │ ├── stomvdn.py
│ │ │ │ └── svdn.py
│ │ │ ├── preprocessoing/
│ │ │ │ ├── __init__.py
│ │ │ │ └── common.py
│ │ │ └── runner.py
│ │ ├── ReadMe.md
│ │ ├── SmashVat/
│ │ │ ├── dqn.py
│ │ │ ├── environment.py
│ │ │ ├── main.py
│ │ │ ├── manual_control.py
│ │ │ ├── qnets.py
│ │ │ ├── side_effect_eval.py
│ │ │ └── window.py
│ │ ├── ToCM/
│ │ │ ├── README.md
│ │ │ ├── agent/
│ │ │ │ ├── controllers/
│ │ │ │ │ └── ToCMController.py
│ │ │ │ ├── learners/
│ │ │ │ │ └── ToCMLearner.py
│ │ │ │ ├── memory/
│ │ │ │ │ └── ToCMMemory.py
│ │ │ │ ├── models/
│ │ │ │ │ └── ToCMModel.py
│ │ │ │ ├── optim/
│ │ │ │ │ ├── loss.py
│ │ │ │ │ └── utils.py
│ │ │ │ ├── runners/
│ │ │ │ │ └── ToCMRunner.py
│ │ │ │ ├── utils/
│ │ │ │ │ └── params.py
│ │ │ │ └── workers/
│ │ │ │ └── ToCMWorker.py
│ │ │ ├── configs/
│ │ │ │ ├── Config.py
│ │ │ │ ├── EnvConfigs.py
│ │ │ │ ├── Experiment.py
│ │ │ │ ├── ToCM/
│ │ │ │ │ ├── ToCMAgentConfig.py
│ │ │ │ │ ├── ToCMControllerConfig.py
│ │ │ │ │ ├── ToCMLearnerConfig.py
│ │ │ │ │ └── optimal/
│ │ │ │ │ └── starcraft/
│ │ │ │ │ ├── AgentConfig.py
│ │ │ │ │ └── LearnerConfig.py
│ │ │ │ └── __init__.py
│ │ │ ├── env/
│ │ │ │ ├── mpe/
│ │ │ │ │ └── MPE.py
│ │ │ │ └── starcraft/
│ │ │ │ └── StarCraft.py
│ │ │ ├── environments.py
│ │ │ ├── mpe/
│ │ │ │ ├── MPE_Env.py
│ │ │ │ ├── __init__.py
│ │ │ │ ├── core.py
│ │ │ │ ├── environment.py
│ │ │ │ ├── multi_discrete.py
│ │ │ │ ├── rendering.py
│ │ │ │ ├── scenario.py
│ │ │ │ └── scenarios/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── hetero_spread.py
│ │ │ │ ├── simple_adversary.py
│ │ │ │ ├── simple_crypto.py
│ │ │ │ ├── simple_crypto_display.py
│ │ │ │ ├── simple_push.py
│ │ │ │ ├── simple_reference.py
│ │ │ │ ├── simple_speaker_listener.py
│ │ │ │ ├── simple_spread.py
│ │ │ │ ├── simple_tag.py
│ │ │ │ └── simple_world_comm.py
│ │ │ ├── networks/
│ │ │ │ ├── ToCM/
│ │ │ │ │ ├── action.py
│ │ │ │ │ ├── critic.py
│ │ │ │ │ ├── dense.py
│ │ │ │ │ ├── rnns.py
│ │ │ │ │ ├── utils.py
│ │ │ │ │ └── vae.py
│ │ │ │ └── transformer/
│ │ │ │ └── layers.py
│ │ │ ├── requirements.txt
│ │ │ ├── run.sh
│ │ │ ├── smac/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── bin/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── map_list.py
│ │ │ │ ├── env/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── multiagentenv.py
│ │ │ │ │ ├── pettingzoo/
│ │ │ │ │ │ ├── StarCraft2PZEnv.py
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ └── test/
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ ├── all_test.py
│ │ │ │ │ │ └── smac_pettingzoo_test.py
│ │ │ │ │ └── starcraft2/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── maps/
│ │ │ │ │ │ ├── SMAC_Maps/
│ │ │ │ │ │ │ └── 2s_vs_1sc.SC2Map
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ └── smac_maps.py
│ │ │ │ │ ├── render.py
│ │ │ │ │ └── starcraft2.py
│ │ │ │ └── examples/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── pettingzoo/
│ │ │ │ │ ├── README.rst
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── pettingzoo_demo.py
│ │ │ │ ├── random_agents.py
│ │ │ │ └── rllib/
│ │ │ │ ├── README.rst
│ │ │ │ ├── __init__.py
│ │ │ │ ├── env.py
│ │ │ │ ├── model.py
│ │ │ │ ├── run_ppo.py
│ │ │ │ └── run_qmix.py
│ │ │ ├── train.py
│ │ │ └── utils/
│ │ │ ├── __init__.py
│ │ │ ├── mlp_buffer.py
│ │ │ ├── mlp_nstep_buffer.py
│ │ │ ├── popart.py
│ │ │ ├── rec_buffer.py
│ │ │ ├── segment_tree.py
│ │ │ └── util.py
│ │ ├── ToM/
│ │ │ ├── BrainArea/
│ │ │ │ ├── PFC_ToM.py
│ │ │ │ ├── TPJ.py
│ │ │ │ ├── __init__.py
│ │ │ │ ├── dACC.py
│ │ │ │ ├── one_hot.py
│ │ │ │ └── test.py
│ │ │ ├── README.md
│ │ │ ├── __init__.py
│ │ │ ├── data/
│ │ │ │ ├── NPC_assessment.csv
│ │ │ │ ├── agent_assessment.csv
│ │ │ │ ├── injury_memory.txt
│ │ │ │ ├── injury_value.txt
│ │ │ │ └── one_hot.py
│ │ │ ├── env/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── env.py
│ │ │ │ ├── env3_train_env00.py
│ │ │ │ └── env3_train_env01.py
│ │ │ ├── main_ToM.py
│ │ │ ├── main_both.py
│ │ │ ├── rulebasedpolicy/
│ │ │ │ ├── Find_a_way.py
│ │ │ │ ├── __init__.py
│ │ │ │ ├── a_star.py
│ │ │ │ ├── load_statedata.py
│ │ │ │ ├── point.py
│ │ │ │ ├── random_map.py
│ │ │ │ ├── statedata_pre.py
│ │ │ │ ├── train.txt
│ │ │ │ └── world_model.py
│ │ │ └── utils/
│ │ │ ├── Encoder.py
│ │ │ └── one_hot.py
│ │ ├── affective_empathy/
│ │ │ ├── BAE-SNN/
│ │ │ │ ├── BAESNN.py
│ │ │ │ ├── README.md
│ │ │ │ ├── env_poly.py
│ │ │ │ └── env_two_poly.py
│ │ │ ├── BEEAD-SNN/
│ │ │ │ ├── BEEAD-SNN.py
│ │ │ │ ├── README.md
│ │ │ │ ├── RL_Brain.py
│ │ │ │ ├── env.py
│ │ │ │ ├── env_poly_SNN.py
│ │ │ │ ├── rsnn.py
│ │ │ │ ├── sd_env.py
│ │ │ │ └── snowdrift_main.py
│ │ │ └── BRP-SNN/
│ │ │ ├── BRP-SNN.py
│ │ │ ├── README.md
│ │ │ ├── env_poly_SNN.py
│ │ │ └── env_two_poly_SNN.py
│ │ └── mirror_test/
│ │ ├── README.md
│ │ └── mirror_test.py
│ ├── Spiking-Transformers/
│ │ ├── LIFNode.py
│ │ ├── README.md
│ │ ├── datasets.py
│ │ ├── main.py
│ │ └── models/
│ │ ├── spike_driven_transformer.py
│ │ ├── spike_driven_transformer_dvs.py
│ │ ├── spike_driven_transformer_v2.py
│ │ ├── spike_driven_transformer_v2_dvs.py
│ │ ├── spikformer.py
│ │ └── spikformer_dvs.py
│ ├── Structural_Development/
│ │ ├── DPAP/
│ │ │ ├── README.md
│ │ │ ├── mask_model.py
│ │ │ ├── prun_main.py
│ │ │ └── utils.py
│ │ ├── DSD-SNN/
│ │ │ ├── README.md
│ │ │ └── cifar100/
│ │ │ ├── available.py
│ │ │ ├── main_simplified.py
│ │ │ ├── manipulate.py
│ │ │ ├── maskcl2.py
│ │ │ └── vgg_snn.py
│ │ ├── ELSM/
│ │ │ ├── evolve.py
│ │ │ ├── lsm.py
│ │ │ ├── model.py
│ │ │ ├── nsganet.py
│ │ │ └── spikes.py
│ │ ├── SCA-SNN/
│ │ │ ├── README.md
│ │ │ ├── configs/
│ │ │ │ └── train.yaml
│ │ │ ├── inclearn/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── convnet/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── classifier.py
│ │ │ │ │ ├── imbalance.py
│ │ │ │ │ ├── maskcl2.py
│ │ │ │ │ ├── network.py
│ │ │ │ │ ├── resnet.py
│ │ │ │ │ ├── sew_resnet.py
│ │ │ │ │ └── utils.py
│ │ │ │ ├── datasets/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── data.py
│ │ │ │ │ └── dataset.py
│ │ │ │ ├── models/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── base.py
│ │ │ │ │ └── incmodel.py
│ │ │ │ └── tools/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── autoaugment_extra.py
│ │ │ │ ├── cutout.py
│ │ │ │ ├── data_utils.py
│ │ │ │ ├── factory.py
│ │ │ │ ├── memory.py
│ │ │ │ ├── metrics.py
│ │ │ │ ├── results_utils.py
│ │ │ │ ├── scheduler.py
│ │ │ │ ├── similar.py
│ │ │ │ └── utils.py
│ │ │ └── main.py
│ │ └── SD-SNN/
│ │ ├── README.md
│ │ ├── main.py
│ │ ├── prun_and_generation.py
│ │ ├── snn_model.py
│ │ └── utils.py
│ ├── Structure_Evolution/
│ │ ├── Adaptive_lsm/
│ │ │ ├── BrainCog-Version/
│ │ │ │ ├── README.md
│ │ │ │ ├── brid.py
│ │ │ │ ├── lsmmodel.py
│ │ │ │ ├── maze.py
│ │ │ │ └── tools/
│ │ │ │ ├── EnuGlobalNetwork.py
│ │ │ │ ├── ExperimentEnvGlobalNetworkSurvival.py
│ │ │ │ ├── MazeTurnEnvVec.py
│ │ │ │ └── nsganet.py
│ │ │ └── raw/
│ │ │ ├── BCM.py
│ │ │ ├── README.md
│ │ │ ├── lstm.py
│ │ │ ├── main.py
│ │ │ ├── pltbcm.py
│ │ │ ├── pltrank.py
│ │ │ ├── q_l.py
│ │ │ └── tools/
│ │ │ ├── EnuGlobalNetwork.py
│ │ │ ├── ExperimentEnvGlobalNetworkSurvival.py
│ │ │ └── MazeTurnEnvVec.py
│ │ ├── EB-NAS/
│ │ │ ├── acc_predictor/
│ │ │ │ ├── adaptive_switching.py
│ │ │ │ ├── carts.py
│ │ │ │ ├── factory.py
│ │ │ │ ├── gp.py
│ │ │ │ ├── mlp.py
│ │ │ │ └── rbf.py
│ │ │ ├── cellmodel.py
│ │ │ ├── ebnas.py
│ │ │ ├── micro_encoding.py
│ │ │ ├── motifs.py
│ │ │ ├── nsganet.py
│ │ │ ├── operations.py
│ │ │ ├── readme.md
│ │ │ ├── single_genome.py
│ │ │ └── tm.py
│ │ ├── ELSM/
│ │ │ ├── README.md
│ │ │ ├── evolve.py
│ │ │ ├── lsm.py
│ │ │ ├── model.py
│ │ │ ├── nsganet.py
│ │ │ └── spikes.py
│ │ └── MSE-NAS/
│ │ ├── auto_augment.py
│ │ ├── cellmodel.py
│ │ ├── evolution.py
│ │ ├── loss_f.py
│ │ ├── micro_encoding.py
│ │ ├── motifs.py
│ │ ├── nsganet.py
│ │ ├── obj.py
│ │ ├── operations.py
│ │ ├── readme.md
│ │ ├── tm.py
│ │ └── utils.py
│ ├── TIM/
│ │ ├── README.md
│ │ ├── main.py
│ │ ├── models/
│ │ │ ├── TIM.py
│ │ │ ├── spikformer_braincog_DVS.py
│ │ │ └── spikformer_braincog_SHD.py
│ │ └── utils/
│ │ ├── MyGrad.py
│ │ ├── MyNode.py
│ │ └── datasets.py
│ └── decision_making/
│ ├── BDM-SNN/
│ │ ├── BDM-SNN-UAV.py
│ │ ├── BDM-SNN-hh.py
│ │ ├── BDM-SNN.py
│ │ ├── README.md
│ │ └── decisionmaking.py
│ ├── RL/
│ │ ├── README.md
│ │ ├── atari/
│ │ │ ├── __init__.py
│ │ │ └── atari_wrapper.py
│ │ ├── mcs-fqf/
│ │ │ ├── discrete.py
│ │ │ ├── main.py
│ │ │ ├── network.py
│ │ │ └── policy.py
│ │ ├── requirements.txt
│ │ ├── sdqn/
│ │ │ ├── main.py
│ │ │ └── network.py
│ │ └── utils/
│ │ ├── __init__.py
│ │ └── normalization.py
│ └── swarm/
│ ├── Collision-Avoidance.py
│ └── README.md
├── requirements.txt
└── setup.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
.idea
*.egg-info/
eggs/
.eggs/
*.exe
*.pyc
/.vscode/
*.code-workspace
__pycache__
# Sphinx documentation
docs/_build/
docs/build/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# event data
*.bin
*.dat
*.pt
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: README.md
================================================
# BrainCog
---
BrainCog is an open source spiking neural network based brain-inspired
cognitive intelligence engine for Brain-inspired Artificial Intelligence, Brain-inspired Embodied AI, and brain simulation. More information on BrainCog can be found on its homepage http://www.brain-cog.network/
The current version of BrainCog contains at least 50 functional spiking neural network algorithms (including but not limited to perception and learning, decision making, knowledge representation and reasoning, motor control, social cognition, etc.) built based on BrainCog infrastructures, and BrainCog also provide brain simulations to drosophila, rodent, monkey, and human brains at multiple scales based on spiking neural networks at multiple scales. More detail in http://www.brain-cog.network/docs/
BrainCog is a community based effort for spiking neural network based artificial intelligence, and we welcome any forms of contributions, from contributing to the development of core components, to contributing for applications.
<img src="http://braincog.ai/static_index/image/github_readme/logo.jpg" alt="./figures/logo.jpg" width="70%" />
BrainCog provides essential and fundamental components to model biological and artificial intelligence.

Our paper is published in [Patterns](https://www.cell.com/patterns/fulltext/S2666-3899(23)00144-7?_returnURL=https%3A%2F%2Flinkinghub.elsevier.com%2Fretrieve%2Fpii%2FS2666389923001447%3Fshowall%3Dtrue). If you use BrainCog in your research, the following paper can be cited as the source for BrainCog.
```bib
@article{Zeng2023,
doi = {10.1016/j.patter.2023.100789},
url = {https://doi.org/10.1016/j.patter.2023.100789},
year = {2023},
month = jul,
publisher = {Cell Press},
pages = {100789},
author = {Yi Zeng and Dongcheng Zhao and Feifei Zhao and Guobin Shen and Yiting Dong and Enmeng Lu and Qian Zhang and Yinqian Sun and Qian Liang and Yuxuan Zhao and Zhuoya Zhao and Hongjian Fang and Yuwei Wang and Yang Li and Xin Liu and Chengcheng Du and Qingqun Kong and Zizhe Ruan and Weida Bi},
title = {{BrainCog}: A spiking neural network based, brain-inspired cognitive intelligence engine for brain-inspired {AI} and brain simulation},
journal = {Patterns}
}
```
## Brain-Inspired AI
BrainCog currently provides cognitive functions components that can be classified
into five categories:
* Perception and Learning
* Knowledge Representation and Reasoning
* Decision Making
* Motor Control
* Social Cognition
* Development and Evolution
* Safety and Security
<img src="https://raw.githubusercontent.com/Brain-Cog-Lab/Brain-Cog/main/figures/mirror-test.gif" alt="mt" width="55%" />
<img src="https://raw.githubusercontent.com/Brain-Cog-Lab/Brain-Cog/main/figures/joy.gif" alt="mt" width="55%" />
## Brain Simulation
BrainCog currently include two parts for brain simulation:
* Brain Cognitive Function Simulation
* Multi-scale Brain Structure Simulation
<img src="https://raw.githubusercontent.com/Brain-Cog-Lab/Brain-Cog/main/figures/braincog-mouse-brain-model-10s.gif" alt="bmbm10s" width="55%" />
<img src="https://raw.githubusercontent.com/Brain-Cog-Lab/Brain-Cog/main/figures/braincog-macaque-10s.gif" alt="bm10s" width="55%" />
<img src="https://raw.githubusercontent.com/Brain-Cog-Lab/Brain-Cog/main/figures/braincog-humanbrain-10s.gif" alt="bh10s" width="55%" />
The anatomical and imaging data is used to support our simulation from various aspects.
## Software-Hardware Codesign (BrainCog Firefly)
<img src="http://www.brain-cog.network/static/image/github_readme/firefly_logo.jpg" alt="bh10s" width="25%" />
BrainCog currently provides `hardware acceleration` for spiking neural network based brain-inspired AI.
<img src="http://braincog.ai/static_index/image/github_readme/firefly.jpg" alt="bh10s" width="55%" />
The following papers are most recent advancement of BrainCog Firefly series for Software-Hardware Codesign for Brain-inspired AI.
* Tenglong Li, Jindong Li, Guobin Shen, Dongcheng Zhao, Qian Zhang, Yi Zeng. FireFly-S: Exploiting Dual-Side Sparsity for Spiking Neural Networks Acceleration With Reconfigurable Spatial Architecture. IEEE Transactions on Circuits and Systems I (TCAS-I), 2024.(https://doi.org/10.1109/TCSI.2024.3496554)
* Jindong Li, Guobin Shen, Dongcheng Zhao, Qian Zhang, Yi Zeng. Firefly v2: Advancing hardware support for high-performance spiking neural network with a spatiotemporal fpga accelerator. IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 2024. (https://ieeexplore.ieee.org/abstract/document/10478105/)
* Jindong Li, Guobin Shen, Dongcheng Zhao, Qian Zhang, Yi Zeng. FireFly: A High-Throughput Hardware Accelerator for Spiking Neural Networks With Efficient DSP and Memory Optimization. IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 2023. (https://ieeexplore.ieee.org/document/10143752)
## Embodied AI and Robotics (BrainCog Embot)
<img src="http://www.brain-cog.network/static/image/github_readme/Embot_logo/%E5%B8%A6%E4%B8%8A%E6%A0%87%E9%A2%98-Embot%20logo%20%E9%80%8F%E6%98%8E%E8%83%8C%E6%99%AF.png" alt="bh10s" width="25%" />
<img src="https://raw.githubusercontent.com/Brain-Cog-Lab/Brain-Cog/main/figures/PushT.gif" alt="bm10s" width="10%" /><img src="https://raw.githubusercontent.com/Brain-Cog-Lab/Brain-Cog/main/figures/Can.gif" alt="bh10s" width="10%" /> <img src="https://raw.githubusercontent.com/Brain-Cog-Lab/Brain-Cog/main/figures/左数第三个.gif" alt="bh10s" width="10%" /> <img src="https://raw.githubusercontent.com/Brain-Cog-Lab/Brain-Cog/main/figures/Square.gif" alt="bm10s" width="10%" /><img src="https://raw.githubusercontent.com/Brain-Cog-Lab/Brain-Cog/main/figures/ToolHang.gif" alt="bh10s" width="10%" /> <img src="https://raw.githubusercontent.com/Brain-Cog-Lab/Brain-Cog/main/figures/左数第六个.gif" alt="bh10s" width="10%" />
BrainCog Embot is an Embodied AI platform under the Brain-inspired Cognitive Intelligence Engine (BrainCog) framework, which is an open-source Brain-inspired AI platform based on Spiking Neural Network.
The following papers are most recent advancement of BrainCog Embot:
* Qianhao Wang, Yinqian Sun, Enmeng Lu, Qian Zhang, Yi Zeng. Brain-Inspired Action Generation with Spiking Transformer Diffusion Policy Model. Advances in Brain Inspired Cognitive Systems (BICS), 2024.(https://link.springer.com/chapter/10.1007/978-981-96-2882-7_23)
* Yinqian Sun, Feifei Zhao, Mingyang Lv, Yi Zeng. Implementing Spiking World Model with Multi-Compartment Neurons for Model-based Reinforcement Learning, 2025. (https://arxiv.org/abs/2503.00713)
* Qianhao Wang, Yinqian Sun, Enmeng Lu, Qian Zhang, Yi Zeng. MTDP: Modulated Transformer Diffusion Policy Model, 2025. (https://arxiv.org/abs/2502.09029)
## Resources
### [[Lectures]](https://github.com/BrainCog-X/Brain-Cog/blob/main/documents/Lectures.md) | [[Tutorial]](https://github.com/BrainCog-X/Brain-Cog/blob/main/documents/Tutorial.md)
## Publications using BrainCog
### [[Brain Inspired AI]](https://github.com/BrainCog-X/Brain-Cog/blob/main/documents/Publication.md) | [[Brain Simulation]](https://github.com/BrainCog-X/Brain-Cog/blob/main/documents/Pub_brain_simulation.md) | [[Software-Hardware Co-design]](https://github.com/BrainCog-X/Brain-Cog/blob/main/documents/Pub_sh_codesign.md)
## BrainCog Data Engine
### [BrainCog Data Engine](https://github.com/BrainCog-X/Brain-Cog/blob/main/documents/Data_engine.md)
## Requirements:
* numpy
* scipy
* h5py
* torch
* torchvision
* torchaudio
* timm == 0.6.13
* scikit-learn
* einops
* thop
* pyyaml
* matplotlib
* seaborn
* pygame
* dv
* tensorboard
* tonic
## Install
### Install Online
1. You can install braincog by running:
> `pip install braincog`
2. Also, install from github by running:
> `pip install git+https://github.com/braincog-X/Brain-Cog.git`
### Install locally
1. If you are a developer, it is recommanded to download or clone
braincog from github.
> `git clone https://github.com/braincog-X/Brain-Cog.git`
2. Enter the folder of braincog
> `cd Brain-Cog`
3. Install braincog locally
> `pip install -e .`
## Example
1. Examples for Image Classification
```shell
cd ./examples/Perception_and_Learning/img_cls/bp
python main.py --model cifar_convnet --dataset cifar10 --node-type LIFNode --step 8 --device 0
```
2. Examples for Event Classification
```shell
cd ./examples/Perception_and_Learning/img_cls/bp
python main.py --model dvs_convnet --node-type LIFNode --dataset dvsc10 --step 10 --batch-size 128 --act-fun QGateGrad --device 0
```
Other BrainCog features and tutorials can be found at http://www.brain-cog.network/docs/
## BrainCog Assistant
Please add our BrainCog Assitant via wechat and we will invite you to our wechat developer group.

## Maintenance
This project is led by
**1.Brain-inspired Cognitive Intelligence Lab, Institute of Automation, Chinese Academy of Sciences http://www.braincog.ai/**
**2.Center for Long-term Artificial Intelligence (CLAI) http://long-term-ai.center/**
================================================
FILE: braincog/__init__.py
================================================
# __all__ = ['base', 'datasets', 'model_zoo', 'utils']
#
# from . import (
# base,
# datasets,
# model_zoo,
# utils
# )
================================================
FILE: braincog/base/__init__.py
================================================
__all__ = ['node', 'connection', 'learningrule', 'brainarea', 'encoder', 'utils', 'conversion']
from . import (
node,
strategy,
connection,
conversion,
learningrule,
brainarea,
utils,
encoder
)
================================================
FILE: braincog/base/brainarea/BrainArea.py
================================================
import numpy as np
import torch, os, sys
from torch import nn
from torch.nn import Parameter
import abc
import math
from abc import ABC
import numpy as np
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from braincog.base.node.node import *
from braincog.base.learningrule.STDP import *
from braincog.base.connection.CustomLinear import *
class BrainArea(nn.Module, abc.ABC):
"""
脑区基类
"""
@abc.abstractmethod
def __init__(self):
"""
"""
super().__init__()
@abc.abstractmethod
def forward(self, x):
"""
计算前向传播过程
:return:x是脉冲
"""
return x
def reset(self):
"""
计算前向传播过程
:return:x是脉冲
"""
pass
class ThreePointForward(BrainArea):
"""
三点前馈脑区
"""
def __init__(self, w1, w2, w3):
"""
"""
super().__init__()
self.node = [IFNode(), IFNode(), IFNode()]
self.connection = [CustomLinear(w1), CustomLinear(w2), CustomLinear(w3)]
self.stdp = []
self.stdp.append(STDP(self.node[0], self.connection[0]))
self.stdp.append(STDP(self.node[1], self.connection[1]))
self.stdp.append(STDP(self.node[2], self.connection[2]))
def forward(self, x):
"""
计算前向传播过程
:return:x是脉冲
"""
x, dw1 = self.stdp[0](x)
x, dw2 = self.stdp[1](x)
x, dw3 = self.stdp[2](x)
return x, (*dw1, *dw2, *dw3)
class Feedback(BrainArea):
"""
反馈网络
"""
def __init__(self, w1, w2, w3):
"""
"""
super().__init__()
self.node = [IFNode(), IFNode()]
self.connection = [CustomLinear(w1), CustomLinear(w2), CustomLinear(w3)]
self.stdp = []
self.stdp.append(MutliInputSTDP(self.node[0], [self.connection[0], self.connection[2]]))
self.stdp.append(STDP(self.node[1], self.connection[1]))
self.x1 = torch.zeros(1, w3.shape[0])
def forward(self, x):
"""
计算前向传播过程
:return:x是脉冲
"""
x, dw1 = self.stdp[0](x, self.x1)
self.x1, dw2 = self.stdp[1](x)
return self.x1, (*dw1, *dw2)
def reset(self):
self.x1 *= 0
class TwoInOneOut(BrainArea):
"""
反馈网络
"""
def __init__(self, w1, w2):
"""
"""
super().__init__()
self.node = [IFNode()]
self.connection = [CustomLinear(w1), CustomLinear(w2)]
self.stdp = []
self.stdp.append(MutliInputSTDP(self.node[0], [self.connection[0], self.connection[1]]))
def forward(self, x1, x2):
"""
计算前向传播过程
:return:x是脉冲
"""
x, dw1 = self.stdp[0](x1, x2)
return x, dw1
class SelfConnectionArea(BrainArea):
"""
反馈网络
"""
def __init__(self, w1, w2 ):
"""
"""
super().__init__()
self.node = [IFNode() ]
self.connection = [CustomLinear(w1), CustomLinear(w2) ]
self.stdp = []
self.stdp.append(MutliInputSTDP(self.node[0], [self.connection[0], self.connection[1]]))
self.x1 = torch.zeros(1, w2.shape[0])
def forward(self, x):
"""
计算前向传播过程
:return:x是脉冲
"""
self.x1, dw1 = self.stdp[0](x, self.x1)
return self.x1, dw1
def reset(self):
self.x1 *= 0
if __name__ == "__main__":
T = 20
w1 = torch.tensor([[1., 1], [1, 1]])
w2 = torch.tensor([[1., 1], [1, 1]])
w3 = torch.tensor([[0.4, 0.4], [0.4, 0.4]])
ba = TwoInOneOut(w1, w2)
for i in range(T):
x = ba(torch.tensor([[0.1, 0.1]]), torch.tensor([[0.1, 0.1]]))
print(x[0])
================================================
FILE: braincog/base/brainarea/IPL.py
================================================
from braincog.base.learningrule.STDP import *
from braincog.base.node.node import *
from braincog.base.connection.CustomLinear import *
import random
import numpy as np
import torch
import os
import sys
from torch import nn
from torch.nn import Parameter
import abc
import math
from abc import ABC
import numpy as np
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
import matplotlib.pyplot as plt
from braincog.base.strategy.surrogate import *
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
class IPLNet(nn.Module):
"""
inferior parietal lobule (IPL)
"""
def __init__(self, connection):
"""
Setting the network structure of IPL
"""
super().__init__()
# IPLM, IPLV
self.num_subMB = 2
self.node = [IzhNodeMU(threshold=30., a=0.02, b=0.2, c=-65., d=6., mem=-70.) for i in range(self.num_subMB)]
self.connection = connection
self.learning_rule = []
self.learning_rule.append(STDP(self.node[0], self.connection[0])) # vPMC_input-IPLM
self.learning_rule.append(MutliInputSTDP(self.node[1], [self.connection[1], self.connection[2]])) # STS_input-IPLV, IPLM-IPLV
self.out_IPLM = torch.zeros((self.connection[0].weight.shape[1]), dtype=torch.float)
self.out_IPLV = torch.zeros((self.connection[1].weight.shape[1]), dtype=torch.float)
def forward(self, input1, input2): # input from vPMC and STS
"""
Calculate the output of IPLv and the weight update between IPLm and IPLv
:param input1: input from vPMC
:param input2: input from STS
:return: output of IPLv, weight update between IPLm and IPLv
"""
self.out_IPLM = self.node[0](self.connection[0](input1))
self.out_IPLV, dw_IPLv = self.learning_rule[1](input2, self.out_IPLM)
if sum(sum(self.out_IPLV)) == 1:
dw_IPLv = dw_IPLv[0][torch.nonzero(dw_IPLv[1])[0][1]][torch.nonzero(dw_IPLv[1])[0][1]] * dw_IPLv[1]
else:
dw_IPLv = dw_IPLv[0]
return self.out_IPLV, dw_IPLv
def UpdateWeight(self, i, dw):
"""
Update the weight
:param i: index of the connection to update
:param dw: weight update
:return: None
"""
self.connection[i].update(dw)
def reset(self):
"""
reset the network
:return: None
"""
for i in range(self.num_subMB):
self.node[i].n_reset()
for i in range(len(self.learning_rule)):
self.learning_rule[i].reset()
def getweight(self):
"""
Get the connection and weight in IPL
:return: connection
"""
return self.connection
================================================
FILE: braincog/base/brainarea/Insula.py
================================================
import numpy as np
import torch,os,sys
from torch import nn
from torch.nn import Parameter
import abc
import math
from abc import ABC
import numpy as np
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
import matplotlib.pyplot as plt
from braincog.base.strategy.surrogate import *
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import random
from braincog.base.connection.CustomLinear import *
from braincog.base.node.node import *
from braincog.base.learningrule.STDP import *
class InsulaNet(nn.Module):
"""
Insula
"""
def __init__(self,connection):
"""
Setting the network structure of Insula
"""
super().__init__()
# Insula
self.num_subMB = 1
self.node = [IzhNodeMU(threshold=30., a=0.02, b=0.2, c=-65., d=6., mem=-70.) for i in range(self.num_subMB)]
self.connection = connection
self.learning_rule = []
self.learning_rule.append(MutliInputSTDP(self.node[0], [self.connection[0],self.connection[1]]))# IPLv-Insula, STS-Insula
self.Insula=torch.zeros((self.connection[1].weight.shape[1]), dtype=torch.float)
def forward(self, input1, input2): # input from IPLv and STS
"""
Calculate the output of Insula
:param input1: input from IPLv
:param input2: input from STS
:return: output of Insula, weight update (unused)
"""
self.out_Insula, dw_Insula = self.learning_rule[0](input1, input2)
return self.out_Insula
def UpdateWeight(self,i,dw):
"""
Update the weight
:param i: index of the connection to update
:param dw: weight update
:return: None
"""
self.connection[i].update(dw)
def reset(self):
"""
reset the network
:return: None
"""
for i in range(self.num_subMB):
self.node[i].n_reset()
for i in range(len(self.learning_rule)):
self.learning_rule[i].reset()
def getweight(self):
"""
Get the connection and weight in Insula
:return: connection
"""
return self.connection
================================================
FILE: braincog/base/brainarea/PFC.py
================================================
import torch
from torch import nn
from braincog.base.brainarea import BrainArea
from braincog.model_zoo.base_module import BaseLinearModule, BaseModule
class PFC:
"""
PFC
"""
def __init__(self):
"""
"""
super().__init__()
def forward(self, x):
"""
:return:x
"""
return x
def reset(self):
"""
:return:x
"""
pass
class dlPFC(BaseModule, PFC):
"""
SNNLinear
"""
def __init__(self,
step,
encode_type,
in_features:int,
out_features:int,
bias,
*args,
**kwargs):
super().__init__(step, encode_type, *args, **kwargs)
self.bias = bias
self.in_features = in_features
self.out_features = out_features
self.fc = self._create_fc()
self.c = self._rest_c()
def _rest_c(self):
c = torch.rand((self.out_features, self.in_features)) # eligibility trace
return c
def _create_fc(self):
"""
the connection of the SNN linear
@return: nn.Linear
"""
fc = nn.Linear(in_features=self.in_features,
out_features=self.out_features, bias=self.bias)
return fc
================================================
FILE: braincog/base/brainarea/__init__.py
================================================
from .basalganglia import basalganglia
from .BrainArea import BrainArea, ThreePointForward, Feedback, TwoInOneOut, SelfConnectionArea
from .Insula import InsulaNet
from .IPL import IPLNet
from .PFC import PFC, dlPFC
__all__ = [
'basalganglia',
'BrainArea', 'ThreePointForward', 'Feedback', 'TwoInOneOut', 'SelfConnectionArea',
'InsulaNet',
'IPLNet',
'PFC', 'dlPFC'
]
================================================
FILE: braincog/base/brainarea/basalganglia.py
================================================
import numpy as np
import torch
import os
import sys
from torch import nn
from torch.nn import Parameter
import abc
import math
from abc import ABC
import numpy as np
import torch
import torch.nn.functional as F
from braincog.base.strategy.surrogate import *
from braincog.base.node.node import IFNode, SimHHNode
from braincog.base.learningrule.STDP import STDP, MutliInputSTDP
from braincog.base.connection.CustomLinear import CustomLinear
class basalganglia(nn.Module):
"""
Basal Ganglia
"""
def __init__(self, ns, na, we, wi, node_type):
super().__init__()
"""
:param ns: 状态个数
:param na:动作个数
:param we:兴奋性连接权重
:param wi:抑制性连接权重
"""
num_state = ns
num_action = na
num_STN = 2
weight_exc = we
weight_inh = wi
# connetions: 0DLPFC-StrD1 1DLPFC-StrD2 2DLPFC-STN 3StrD1-GPi 4StrD2-GPe 5Gpe-Gpi 6STN-Gpi 7STN-Gpe 8Gpe-STN
bg_connection = []
bg_con_mask = []
# DLPFC-StrD1
con_matrix1 = torch.zeros((num_state, num_state * num_action), dtype=torch.float)
for i in range(num_state):
for j in range(num_action):
con_matrix1[i, i * num_action + j] = 1
bg_con_mask.append(con_matrix1)
bg_connection.append(CustomLinear(weight_exc * con_matrix1, con_matrix1))
# DLPFC-StrD2
bg_connection.append(CustomLinear(weight_exc * con_matrix1, con_matrix1))
bg_con_mask.append(con_matrix1)
# DLPFC-STN
con_matrix3 = torch.ones((num_state, num_STN), dtype=torch.float)
bg_con_mask.append(con_matrix3)
bg_connection.append(CustomLinear(weight_exc * con_matrix3, con_matrix3))
# StrD1-GPi
con_matrix4 = torch.zeros((num_state * num_action, num_action), dtype=torch.float)
for i in range(num_state):
for j in range(num_action):
con_matrix4[i * num_action + j, j] = 1
bg_con_mask.append(con_matrix4)
bg_connection.append(CustomLinear(weight_inh * con_matrix4, con_matrix4))
# StrD2-GPe
bg_con_mask.append(con_matrix4)
bg_connection.append(CustomLinear(weight_inh * con_matrix4, con_matrix4))
# Gpe-Gpi
con_matrix5 = torch.eye((num_action), dtype=torch.float)
bg_con_mask.append(con_matrix5)
bg_connection.append(CustomLinear(weight_inh * con_matrix5, con_matrix5))
# STN-Gpi
con_matrix6 = torch.ones((num_STN, num_action), dtype=torch.float)
bg_con_mask.append(con_matrix6)
bg_connection.append(CustomLinear(0.5 * weight_exc * con_matrix6, con_matrix6))
# STN-Gpe
bg_con_mask.append(con_matrix6)
bg_connection.append(CustomLinear(0.5 * weight_exc * con_matrix6, con_matrix6))
# Gpe-STN
con_matrix7 = torch.ones((num_action, num_STN), dtype=torch.float)
bg_con_mask.append(con_matrix7)
bg_connection.append(CustomLinear(0.5 * weight_inh * con_matrix7, con_matrix7))
self.num_subBG = 5
self.node_type = node_type
if self.node_type == "hh":
self.node = [SimHHNode() for i in range(self.num_subBG)]
if self.node_type == "lif":
self.node = [IFNode() for i in range(self.num_subBG)]
self.connection = bg_connection
self.mask = bg_con_mask
self.learning_rule = []
trace_stdp = 0.99
self.learning_rule.append(STDP(self.node[0], self.connection[0], trace_stdp)) # DLPFC-StrD1
self.learning_rule.append(STDP(self.node[1], self.connection[1], trace_stdp)) # DLPFC-StrD2
self.learning_rule.append(MutliInputSTDP(self.node[2], [self.connection[2], self.connection[8]])) # DLPFC-STN
self.learning_rule.append(MutliInputSTDP(self.node[3], [self.connection[4], self.connection[7]])) # StrD2-GPe STN-Gpe
self.learning_rule.append(MutliInputSTDP(self.node[4], [self.connection[3], self.connection[5], self.connection[6]])) # StrD1-GPi Gpe-Gpi STN-Gpi
self.out_StrD1 = torch.zeros((self.connection[0].weight.shape[1]), dtype=torch.float)
self.out_StrD2 = torch.zeros((self.connection[1].weight.shape[1]), dtype=torch.float)
self.out_STN = torch.zeros((self.connection[2].weight.shape[1]), dtype=torch.float)
self.out_Gpi = torch.zeros((self.connection[3].weight.shape[1]), dtype=torch.float)
self.out_Gpe = torch.zeros((self.connection[4].weight.shape[1]), dtype=torch.float)
def forward(self, input):
"""
计算由当前输入基底节网络的输出
:param input: 输入电流
:return: 输出脉冲
"""
self.out_StrD1, dw_strd1 = self.learning_rule[0](input)
self.out_StrD2, dw_strd2 = self.learning_rule[1](input)
self.out_STN, dw_stn = self.learning_rule[2](input, self.out_Gpe)
self.out_Gpe, dw_gpe = self.learning_rule[3](self.out_StrD2, self.out_STN)
self.out_Gpi, dw_gpi = self.learning_rule[4](self.out_StrD1, self.out_Gpe, self.out_STN)
return self.out_Gpi
def UpdateWeight(self, i, dw):
"""
更新基底节内第i组连接的权重 根据传入的dw值
:param i: 要更新的连接的索引
:param dw: 更新的量
:return: None
"""
self.connection[i].update(dw)
self.connection[i].weight.data = F.normalize(self.connection[i].weight.data.float(), p=1, dim=1)
def reset(self):
"""
reset神经元或学习法则的中间量
:return: None
"""
for i in range(self.num_subMB):
self.node[i].n_reset()
for i in range(len(self.learning_rule)):
self.learning_rule[i].reset()
def getweight(self):
"""
获取基底节网络的连接(包括权值等)
:return: 基底节网络的连接
"""
return self.connection
def getmask(self):
"""
获取基底节网络的连接(仅连接矩阵)
:return: 基底节网络的连接矩阵
"""
return self.mask
if __name__ == "__main__":
BG = basalganglia(4, 2, 0.2, -4)
con = BG.getweight()
print(con)
================================================
FILE: braincog/base/brainarea/dACC.py
================================================
import torch
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(threshold=np.inf)
from utils.one_hot import *
import os
import time
import sys
from tqdm import tqdm
from braincog.base.encoder.population_coding import *
from braincog.model_zoo.base_module import BaseLinearModule, BaseModule
from braincog.base.learningrule.STDP import *
import sys
sys.path.append("..")
class dACC(BaseModule):
"""
SNNLinear
"""
def __init__(self,
step,
encode_type,
in_features:int,
out_features:int,
bias,
node,
*args,
**kwargs):
super().__init__(step, encode_type, *args, **kwargs)
self.bias = bias
self.in_features = in_features
self.out_features = out_features
self.node1 = node(threshold=0.5, tau=2.)
self.node_name1 = node
self.node2 = node(threshold=0.1, tau=2.)
self.node_name2 = node
self.fc = self._create_fc()
self.c = self._rest_c()
def _rest_c(self):
c = torch.rand((self.out_features, self.in_features)) # eligibility trace
return c
def _create_fc(self):
"""
the connection of the SNN linear
@return: nn.Linear
"""
fc = nn.Linear(in_features=self.in_features,
out_features=self.out_features, bias=self.bias)
return fc
def update_c(self, c, STDP, tau_c=0.2):
"""
update the trace of eligibility
@param c: a tensor to record eligibility
@param STDP: the results of STDP
@param tau_c: the parameter of trace decay
@return: a update tensor to record eligibility
Equation:
delta_c = (-(c / tau_c) + STDP) * dela_t
c = c + delta_c
reference:<Solving the Distal Reward Problem through ...>
"""
c = c + tau_c * STDP
return c
def forward(self, inputs, epoch):
"""
decision
@param inputs: state
@return: action
"""
output = []
stdp = STDP(self.node2, self.fc, decay=0.80)
self.c = self._rest_c()
# stdp.connection.weight.data = torch.rand((self.out_features, self.in_features))
for i in range(inputs.shape[0]):
for t in range(self.step):
l1_in = torch.tensor(inputs[i, :])
l1_out = self.node1(l1_in).unsqueeze(0) #pre : l1_out
l2_out, dw = stdp(l1_out) #dw -- STDP
self.c = self.update_c(self.c, dw[0])
output.append(torch.min(l2_out))
# output.append((l2_out.any() == 0).cpu().detach().numpy().tolist())
return output
# if __name__ == '__main__':
# np.random.seed(6)
# T = 5
# num_popneurons = 2
# safety = 2
# epoch = 50
# file_name = "/home/zhaozhuoya/braincog/examples/ToM/data/injury_value.txt"
# state = []
# with open(file_name) as f:
# data = []
# data_split = f.readlines() #
# for i in data_split:
# state.append(one_hot(int(i[0])))
#
# output = np.array(state)
# train_y = output
# test_y = output[79:82]#output[12].reshape(1,2)
#
# file_name = "/home/zhaozhuoya/braincog/examples/ToM/data/injury_memory.txt"
# state = []
# with open(file_name) as f:
# data_split = f.readlines()
# for i in data_split:
# data = []
# data.append(int(bool(abs(int(i[2]) - int(i[18]))))*10)
# data.append(int(bool(abs(int(i[5]) - int(i[21]))))*10)
# state.append(data)
# input = np.array(state)
# train_x = input
# test_x = input[79:82]
# dACC_net = dACC(step=T, encode_type='rate', bias=True,
# in_features=num_popneurons, out_features=safety,
# node=node.LIFNode)
# dACC_net.fc.weight.data = torch.rand((safety, num_popneurons))
# dACC_net.load_state_dict(torch.load('./checkpoint/dACC_net.pth')['dacc'])
# output = dACC_net(inputs=train_x, epoch=50)
# for i in range(len(output)):
# print(output[i], train_x[i])
# torch.save({'dacc': dACC_net.state_dict()}, os.path.join('./checkpoint', 'dACC_net.pth'))
# dACC_net.load_state_dict(torch.load('./checkpoint/dACC_net.pth')['dacc'])
# output = dACC_net(inputs=test_x, epoch=50)
# for i in range(len(test_x)):
#
# print(output[i],test_x[i])
================================================
FILE: braincog/base/connection/CustomLinear.py
================================================
import os
import sys
import numpy as np
import torch
from torch import nn
from torch import einsum
import torch.nn.functional as F
class CustomLinear(nn.Module):
"""
用户自定义连接 通常stdp的计算
"""
def __init__(self, weight, mask=None):
super().__init__()
self.weight = nn.Parameter(weight, requires_grad=True)
self.mask = mask
def forward(self, x: torch.Tensor):
"""
:param x:输入 x.shape = [N ]
"""
#
# ret.shape = [C]
return x.matmul(self.weight)
def update(self, dw):
"""
:param dw:权重更新量
"""
with torch.no_grad():
if self.mask is not None:
dw *= self.mask
self.weight.data += dw
================================================
FILE: braincog/base/connection/__init__.py
================================================
from .CustomLinear import CustomLinear
from .layer import VotingLayer, WTALayer, NDropout, ThresholdDependentBatchNorm2d, LayerNorm, SMaxPool, LIPool
__all__ = [
'CustomLinear',
'VotingLayer', 'WTALayer', 'NDropout', 'ThresholdDependentBatchNorm2d', 'LayerNorm', 'SMaxPool', 'LIPool'
]
================================================
FILE: braincog/base/connection/layer.py
================================================
import warnings
import math
import numpy as np
import torch
from torch import nn
from torch import einsum
from torch.nn.modules.batchnorm import _BatchNorm
import torch.nn.functional as F
from torch.nn import Parameter
from einops import rearrange
class VotingLayer(nn.Module):
"""
用于SNNs的输出层, 几个神经元投票选出最终的类
:param voter_num: 投票的神经元的数量, 例如 ``voter_num = 10``, 则表明会对这10个神经元取平均
"""
def __init__(self, voter_num: int):
super().__init__()
self.voting = nn.AvgPool1d(voter_num, voter_num)
def forward(self, x: torch.Tensor):
# x.shape = [N, voter_num * C]
# ret.shape = [N, C]
return self.voting(x.unsqueeze(1)).squeeze(1)
class WTALayer(nn.Module):
"""
winner take all用于SNNs的每层后,将随机选取一个或者多个输出
:param k: X选取的输出数目 k默认等于1
"""
def __init__(self, k=1):
super().__init__()
self.k = k
def forward(self, x: torch.Tensor):
# x.shape = [N, C,W,H]
# ret.shape = [N, C,W,H]
pos = x * torch.rand(x.shape, device=x.device)
if self.k > 1:
x = x * (pos >= pos.topk(self.k, dim=1)[0][:, -1:]).float()
else:
x = x * (pos >= pos.max(1, True)[0]).float()
return x
class NDropout(nn.Module):
"""
与Drop功能相同, 但是会保证同一个样本不同时刻的mask相同.
"""
def __init__(self, p):
super(NDropout, self).__init__()
self.p = p
self.mask = None
def n_reset(self):
"""
重置, 能够生成新的mask
:return:
"""
self.mask = None
def create_mask(self, x):
"""
生成新的mask
:param x: 输入Tensor, 生成与之形状相同的mask
:return:
"""
self.mask = F.dropout(torch.ones_like(x.data), self.p, training=True)
def forward(self, x):
if self.training:
if self.mask is None:
self.create_mask(x)
return self.mask * x
else:
return x
class WSConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, gain=True):
super(WSConv2d, self).__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
if gain:
self.gain = nn.Parameter(torch.ones(self.out_channels, 1, 1, 1))
else:
self.gain = 1.
def forward(self, x):
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2,
keepdim=True).mean(dim=3, keepdim=True)
weight = weight - weight_mean
std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-5
weight = self.gain * weight / std.expand_as(weight)
return F.conv2d(x, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
class ThresholdDependentBatchNorm2d(_BatchNorm):
"""
tdBN
https://ojs.aaai.org/index.php/AAAI/article/view/17320
"""
def __init__(self, num_features, alpha: float, threshold: float = .5, layer_by_layer: bool = True, affine: bool = True,**kwargs):
self.alpha = alpha
self.threshold = threshold
super().__init__(num_features=num_features, affine=affine)
assert layer_by_layer, \
'tdBN may works in step-by-step mode, which will not take temporal dimension into batch norm'
assert self.affine, 'ThresholdDependentBatchNorm needs to set `affine = True`!'
torch.nn.init.constant_(self.weight, alpha * threshold)
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError("expected 4D input (got {}D input)".format(input.dim()))
def forward(self, input):
# input = rearrange(input, '(t b) c w h -> b (t c) w h', t=self.step)
output = super().forward(input)
return output
# return rearrange(output, 'b (t c) w h -> (t b) c w h', t=self.step)
class TEBN(nn.Module):
def __init__(self, num_features,step, eps=1e-5, momentum=0.1,**kwargs):
super(TEBN, self).__init__()
self.bn = nn.BatchNorm3d(num_features)
self.p = nn.Parameter(torch.ones(4, 1, 1, 1, 1))
self.step=step
def forward(self, input):
#y = input.transpose(1, 2).contiguous() # N T C H W , N C T H W
y = rearrange(input,"(t b) c w h -> t c b w h",t=self.step)
y = self.bn(y)
# y = y.contiguous().transpose(1, 2)
# y = y.transpose(0, 1).contiguous() # NTCHW TNCHW
y = rearrange(y,"t c b w h -> t b c w h")
y = y * self.p
#y = y.contiguous().transpose(0, 1) # TNCHW NTCHW
y = rearrange(y, "t b c w h -> (t b) c w h")
return y
class LayerNorm(nn.Module):
""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape,)
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class SMaxPool(nn.Module):
"""用于转换方法的最大池化层的常规替换
选用具有最大脉冲发放率的神经元的脉冲通过,能够满足一般性最大池化层的需要
Reference:
https://arxiv.org/abs/1612.04052
"""
def __init__(self, child):
super(SMaxPool, self).__init__()
self.opration = child
self.sumspike = 0
def forward(self, x):
self.sumspike += x
single = self.opration(self.sumspike * 1000)
sum_plus_spike = self.opration(x + self.sumspike * 1000)
return sum_plus_spike - single
def reset(self):
self.sumspike = 0
class LIPool(nn.Module):
r"""用于转换方法的最大池化层的精准替换
LIPooling通过引入侧向抑制机制保证在转换后的SNN中输出的最大值与期望值相同。
Reference:
https://arxiv.org/abs/2204.13271
"""
def __init__(self, child=None):
super(LIPool, self).__init__()
if child is None:
raise NotImplementedError("child should be Pooling operation with torch.")
self.opration = child
self.sumspike = 0
def forward(self, x):
self.sumspike += x
out = self.opration(self.sumspike)
self.sumspike -= F.interpolate(out, scale_factor=2, mode='nearest')
return out
def reset(self):
self.sumspike = 0
class CustomLinear(nn.Module):
def __init__(self, in_channels, out_channels, bias=True):
super(CustomLinear, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
# self.weight = Parameter(torch.tensor([
# [1., .5, .25, .125],
# [0., 1., .5, .25],
# [0., 0., 1., .5],
# [0., 0., 0., 1.]
# ]), requires_grad=True)
self.weight = Parameter(torch.diag(torch.ones(self.in_channels)), requires_grad=True)
# self.weight = Parameter(torch.randn(self.in_channels, self.in_channels))
mask = torch.tril(torch.ones(self.in_channels, self.in_channels), diagonal=0)
self.register_buffer('mask', mask)
if bias:
self.bias = Parameter(torch.zeros(out_channels), requires_grad=True)
else:
self.register_parameter('bias', None)
def forward(self, inputs):
weight = self.mask * self.weight
return F.linear(inputs, weight, self.bias)
================================================
FILE: braincog/base/conversion/__init__.py
================================================
from .convertor import HookScale, Hookoutput, Scale, Convertor, SNode
from .merge import mergeConvBN, merge
__all__ = [
'Hookoutput', 'HookScale', 'Scale', 'Convertor', 'SNode',
'merge', 'mergeConvBN'
]
================================================
FILE: braincog/base/conversion/convertor.py
================================================
import torch
import torch.nn as nn
from braincog.base.connection.layer import SMaxPool, LIPool
from .merge import mergeConvBN
from .spicalib import SpiCalib
import types
class HookScale(nn.Module):
""" 在每个ReLU层后记录该层的百分位最大值
For channelnorm: 获取最大值时使用了torch.quantile
For layernorm: 使用sort,然后手动取百分比,因为quantile在计算单个通道时有上限,batch较大时易出错
"""
def __init__(self,
p: float = 0.9995,
channelnorm: bool = False,
gamma: float = 0.999,
):
super().__init__()
if channelnorm:
self.register_buffer('scale', torch.tensor(0.0))
else:
self.register_buffer('scale', torch.tensor(0.0))
self.p = p
self.channelnorm = channelnorm
self.gamma = gamma
def forward(self, x):
x = torch.where(x.detach() < self.gamma, x.detach(),
torch.tensor(self.gamma, dtype=x.dtype, device=x.device))
if len(x.shape) == 4 and self.channelnorm:
num_channel = x.shape[1]
tmp = torch.quantile(x.permute(1, 0, 2, 3).reshape(num_channel, -1), self.p, dim=1,
interpolation='lower') + 1e-10
self.scale = torch.max(tmp, self.scale)
else:
sort, _ = torch.sort(x.view(-1))
self.scale = torch.max(sort[int(sort.shape[0] * self.p) - 1], self.scale)
return x
class Hookoutput(nn.Module):
"""
在伪转换中为ReLU和ClipQuan提供包装,用于监控其输出
"""
def __init__(self, module):
super(Hookoutput, self).__init__()
self.activation = 0.
self.operation = module
def forward(self, x):
output = self.operation(x)
self.activation = output.detach()
return output
class Scale(nn.Module):
"""
对前向过程的值进行缩放
"""
def __init__(self, scale: float = 1.0):
super().__init__()
self.register_buffer('scale', scale)
def forward(self, x):
if len(self.scale.shape) == 1:
return self.scale.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x) * x
else:
return self.scale * x
def reset(self):
"""
转换的网络来自ANN,需要将新附加上的脉冲module进行reset
判断module名称并调用各自节点的reset方法
"""
children = list(self.named_children())
for i, (name, child) in enumerate(children):
if isinstance(child, (SNode, LIPool, SMaxPool)):
child.reset()
else:
reset(child)
class Convertor(nn.Module):
"""ANN2SNN转换器
用于转换完整的pytorch模型,使用dataloader中部分数据进行最大值计算,通过p控制获取第p百分比最大值
channlenorm: https://arxiv.org/abs/1903.06530
channelnorm可以对每个通道获取最大值并进行权重归一化
gamma: https://arxiv.org/abs/2204.13271
gamma可以控制burst spikes的脉冲数,burst spike可以提高神经元的脉冲发放能力,减小信息残留
lipool: https://arxiv.org/abs/2204.13271
lipool用于使用侧向抑制机制进行最大池化,LIPooling能够对SNN中的最大池化进行有效的转换
soft_mode: https://arxiv.org/abs/1612.04052
soft_mode被称为软重置,可以减小重置过程神经元的信息损失,有效提高转换的性能
merge用于是否对网络中相邻的卷积和BN层进行融合
batch_norm控制对dataloader的数据集的用量
"""
def __init__(self,
dataloader,
device=None,
p=0.9995,
channelnorm=False,
lipool=True,
gamma=1,
soft_mode=True,
merge=True,
batch_num=1,
spicalib=0
):
super(Convertor, self).__init__()
self.dataloader = dataloader
self.device = device
self.p = p
self.channelnorm = channelnorm
self.lipool = lipool
self.gamma = gamma
self.soft_mode = soft_mode
self.merge = merge
self.batch_num = batch_num
self.spicalib = spicalib
def forward(self, model):
model.eval()
model = Convertor.register_hook(model, self.p, self.channelnorm, self.gamma)
model = Convertor.get_percentile(model, self.dataloader, self.device, batch_num=self.batch_num)
model = mergeConvBN(model) if self.merge else model
model = Convertor.replace_for_spike(model, self.lipool, self.soft_mode, self.gamma, self.spicalib)
model.reset = types.MethodType(reset, model)
return model
@staticmethod
def register_hook(model, p=0.99, channelnorm=False, gamma=0.999):
""" Reference: https://github.com/fangwei123456/spikingjelly
将网络的每一层后注册一个HookScale类
该方法在仿真上等效于与对权重进行归一化操作,且易扩展到任意结构的网络中
"""
children = list(model.named_children())
for _, (name, child) in enumerate(children):
if isinstance(child, nn.ReLU):
model._modules[name] = nn.Sequential(nn.ReLU(), HookScale(p, channelnorm, gamma))
else:
Convertor.register_hook(child, p, channelnorm, gamma)
return model
@staticmethod
def get_percentile(model, dataloader, device, batch_num=1):
"""
该函数需与具有HookScale层的网络配合使用
"""
for idx, (data, _) in enumerate(dataloader):
data = data.to(device)
if idx >= batch_num:
break
model(data)
return model
@staticmethod
def replace_for_spike(model, lipool=True, soft_mode=True, gamma=1, spicalib=0):
"""
该函数用于将定义好的ANN模型转换为SNN模型
ReLU单元将被替换为脉冲神经元,
如果模型中使用了最大池化,lipool参数将定义使用常规模型还是LIPooling方法
"""
children = list(model.named_children())
for _, (name, child) in enumerate(children):
if isinstance(child, nn.Sequential) and len(child) == 2 and isinstance(child[0], nn.ReLU) and isinstance(child[1], HookScale):
model._modules[name] = nn.Sequential(
Scale(1.0 / child[1].scale),
SNode(soft_mode, gamma),
SpiCalib(spicalib),
Scale(child[1].scale)
)
if isinstance(child, nn.MaxPool2d):
model._modules[name] = LIPool(child) if lipool else SMaxPool(child)
else:
Convertor.replace_for_spike(child, lipool, soft_mode, gamma)
return model
class SNode(nn.Module):
"""
用于转换后的SNN的神经元模型
IF神经元模型由gamma=1确定,当gamma为其他大于1的值时,即为使用burst神经元模型
soft_mode用于定义神经元的重置方法,soft重置能够极大地减少神经元在重置过程的信息损失
"""
def __init__(self, soft_mode=False, gamma=5):
super(SNode, self).__init__()
self.threshold = 1.0
self.soft_mode = soft_mode
self.gamma = gamma
self.mem = 0
self.spike = 0
def forward(self, x):
self.mem = self.mem + x
self.spike = (self.mem / self.threshold).floor().clamp(min=0, max=self.gamma)
self.soft_reset() if self.soft_mode else self.hard_reset
out = self.spike
return out
def hard_reset(self):
"""
硬重置后神经元的膜电势被重置为0
"""
self.mem = self.mem * (1 - self.spike.detach())
def soft_reset(self):
"""
软重置后神经元的膜电势为神经元当前膜电势减去阈值
"""
self.mem = self.mem - self.threshold * self.spike.detach()
def reset(self):
self.mem = 0
self.spike = 0
================================================
FILE: braincog/base/conversion/merge.py
================================================
import torch
import torch.nn as nn
def mergeConvBN(m):
"""
合并网络模块中的卷积与BN层
"""
children = list(m.named_children())
c, cn = None, None
for i, (name, child) in enumerate(children):
if isinstance(child, nn.BatchNorm2d):
bc = merge(c, child)
m._modules[cn] = bc
m._modules[name] = torch.nn.Identity()
c = None
elif isinstance(child, nn.Conv2d):
c = child
cn = name
else:
mergeConvBN(child)
return m
def merge(conv, bn):
"""
conv: 卷积层实例
bn: BN层实例
"""
w = conv.weight
mean, var_sqrt, beta, gamma = bn.running_mean, torch.sqrt(bn.running_var + bn.eps), bn.weight, bn.bias
b = conv.bias if conv.bias is not None else mean.new_zeros(mean.shape)
w = w * (beta / var_sqrt).reshape([conv.out_channels, 1, 1, 1])
b = (b - mean) / var_sqrt * beta + gamma
fused_conv = nn.Conv2d(conv.in_channels, conv.out_channels, conv.kernel_size, conv.stride, conv.padding, bias=True)
fused_conv.weight = nn.Parameter(w)
fused_conv.bias = nn.Parameter(b)
return fused_conv
================================================
FILE: braincog/base/conversion/spicalib.py
================================================
import torch
import torch.nn as nn
class SpiCalib(nn.Module):
def __init__(self, allowance):
super(SpiCalib, self).__init__()
self.allowance = allowance
self.sumspike = 0
self.t = 0
def forward(self, x):
if self.allowance == 0:
return x
if self.t == 0:
self.last_spike = torch.zeros_like(x)
self.avg_time = torch.zeros_like(x)
self.num_spike = torch.zeros_like(x)
SPIKE_MASK = x > 0
self.num_spike[SPIKE_MASK] += 1
self.avg_time[SPIKE_MASK] = (self.t - self.last_spike + self.avg_time * (self.num_spike - 1))[SPIKE_MASK] / \
self.num_spike[SPIKE_MASK]
self.last_spike[SPIKE_MASK] = self.t
SIN_MASK = self.t - self.last_spike > self.avg_time + self.allowance
x[SIN_MASK] -= 1.0
self.sumspike += x
x[self.sumspike <= -1] = 0
self.t += 1
return x
def reset(self):
self.sumspike = 0
self.t = 0
================================================
FILE: braincog/base/encoder/__init__.py
================================================
from .encoder import Encoder
from .population_coding import PEncoder
from.qs_coding import QSEncoder
__all__ = [
'Encoder',
'PEncoder',
'QSEncoder'
]
================================================
FILE: braincog/base/encoder/encoder.py
================================================
import torch
import torch.nn as nn
from einops import rearrange, repeat
from braincog.base.strategy.surrogate import GateGrad
class AutoEncoder(nn.Module):
def __init__(self, step, spike_output=True):
super(AutoEncoder, self).__init__()
self.step = step
self.spike_output = spike_output
# self.gru = nn.GRU(input_size=1, hidden_size=1, num_layers=3)
self.sigmoid = nn.Sigmoid()
self.fc1 = nn.Linear(1, self.step)
self.fc2 = nn.Linear(self.step, self.step)
self.relu = nn.ReLU()
#
self.act_fun = GateGrad()
def forward(self, x):
shape = x.shape
x = self.fc1(x.view(-1, 1))
x = self.relu(x)
x = self.fc2(x).transpose_(1, 0)
# x = x.view(1, -1, 1).repeat(self.step, 1, 1)
# x, _ = self.gru(x)
x = self.sigmoid(x)
if not self.spike_output:
return x.view(self.step, *shape)
else:
return self.act_fun(x).view(self.step, *shape)
# class TransEncoder(nn.Module):
# def __init__(self, step):
# super(TransEncoder, self).__init__()
# self.step = step
# self.trans = Transformer(dim=128, depth=3, heads=8, dim_head=, mlp_dim, dropout=0.)
class Encoder(nn.Module):
'''
将static image编码
:param step: 仿真步长
:param encode_type: 编码方式, 可选 ``direct``, ``ttfs``, ``rate``, ``phase``
:param temporal_flatten: 直接将temporal维度concat到channel维度
:param layer_by_layer: 是否使用计算每一层的所有的输出的方式进行推理
:param
(step, batch_size, )
'''
def __init__(self, step, encode_type='ttfs', *args, **kwargs):
super(Encoder, self).__init__()
self.step = step
self.fun = getattr(self, encode_type)
self.encode_type = encode_type
self.temporal_flatten = kwargs['temporal_flatten'] if 'temporal_flatten' in kwargs else False
self.layer_by_layer = kwargs['layer_by_layer'] if 'layer_by_layer' in kwargs else False
self.no_encode = kwargs['adaptive_node'] if 'adaptive_node' in kwargs else False
self.groups = kwargs['n_groups'] if 'n_groups' in kwargs else 1
# if encode_type == 'auto':
# self.fun = AutoEncoder(self.step, spike_output=False)
def forward(self, inputs, deletion_prob=None, shift_var=None):
if len(inputs.shape) == 5: # DVS data
outputs = inputs.permute(1, 0, 2, 3, 4).contiguous() # t, b, c, w, h
elif len(inputs.shape) == 3: # DAS data
outputs = inputs.permute(1, 0, 2).contiguous() # t, b, c
else:
if self.encode_type == 'auto':
if self.fun.device != inputs.device:
self.fun.to(inputs.device)
outputs = self.fun(inputs)
if deletion_prob:
outputs = self.delete(outputs, deletion_prob)
if shift_var:
outputs = self.shift(outputs, shift_var)
if self.temporal_flatten or self.no_encode:
outputs = rearrange(outputs, 't b c w h -> 1 b (t c) w h')
elif self.groups != 1:
outputs = rearrange(outputs, 't b c w h -> b (c t) w h')
elif self.layer_by_layer:
if len(inputs.shape) == 3:
outputs = rearrange(outputs, 't b c-> (t b) c')
else:
outputs = rearrange(outputs, 't b c w h -> (t b) c w h')
return outputs
@torch.no_grad()
def direct(self, inputs):
"""
直接编码
:param inputs: 形状(b, c, w, h)
:return: (t, b, c, w, h)
"""
outputs = repeat(inputs, 'b c w h -> t b c w h', t=self.step)
# outputs = inputs.unsqueeze(0).repeat(self.step, *([1] * len(shape)))
return outputs
def auto(self, inputs):
# TODO: Calc loss for firing-rate
shape = inputs.shape
outputs = self.fun(inputs)
print(outputs.shape)
return outputs
@torch.no_grad()
def ttfs(self, inputs):
"""
Time-to-First-Spike Encoder
:param inputs: static data
:return: Encoded data
"""
# print("ttfs")
shape = (self.step,) + inputs.shape
outputs = torch.zeros(shape, device=self.device)
for i in range(self.step):
mask = (inputs * self.step <= (self.step - i)
) & (inputs * self.step > (self.step - i - 1))
outputs[i, mask] = 1 / (i + 1)
return outputs
@torch.no_grad()
def rate(self, inputs):
"""
Rate Coding
:param inputs:
:return:
"""
shape = (self.step,) + inputs.shape
return (inputs > torch.rand(shape, device=inputs.device)).float()
@torch.no_grad()
def phase(self, inputs):
"""
Phase Coding
相位编码
:param inputs: static data
:return: encoded data
"""
shape = (self.step,) + inputs.shape
outputs = torch.zeros(shape, device=self.device)
inputs = (inputs * 256).long()
val = 1.
for i in range(self.step):
if i < 8:
mask = (inputs >> (8 - i - 1)) & 1 != 0
outputs[i, mask] = val
val /= 2.
else:
outputs[i] = outputs[i % 8]
return outputs
@torch.no_grad()
def delete(self, inputs, prob):
"""
在Coding 过程中随机删除脉冲
:param inputs: encoded data
:param prob: 删除脉冲的概率
:return: 随机删除脉冲之后的数据
"""
mask = (inputs >= 0) & (torch.randn_like(
inputs, device=self.device) < prob)
inputs[mask] = 0.
return inputs
@torch.no_grad()
def shift(self, inputs, var):
"""
对数据进行随机平移, 添加噪声
:param inputs: encoded data
:param var: 随机平移的方差
:return: shifted data
"""
# TODO: Real-time shift
outputs = torch.zeros_like(inputs)
for step in range(self.step):
shift = (var * torch.randn(1)).round_() + step
shift.clamp_(min=0, max=self.step - 1)
outputs[step] += inputs[int(shift)]
return outputs
================================================
FILE: braincog/base/encoder/population_coding.py
================================================
import torch
import torch.nn as nn
import torchvision.utils
class PEncoder(nn.Module):
"""
Population coding
:param step: time steps
:param encode_type: encoder type (str)
"""
def __init__(self, step, encode_type):
super().__init__()
self.step = step
self.fun = getattr(self, encode_type)
def forward(self, inputs, num_popneurons, *args, **kwargs):
outputs = self.fun(inputs, num_popneurons, *args, **kwargs)
return outputs
@torch.no_grad()
def population_time(self, inputs, m):
"""
one feature will be encoded into gauss_neurons
the center of i-th neuron is: gauss --
.. math::
\\mu u_i = I_min + (2i-3)/2(I_max-I_min)/(m -2)
the width of i-th neuron is : gauss --
.. math::
\\sigma sigma_i = \\frac{1}{1.5}\\frac{(I_max-I_min)}{m - 2}
:param inputs: (N_num, N_feature) array
:param m: the number of the gaussian neurons
i : the i_th gauss_neuron
1.5: experience value
popneurons_spike_t: gauss -- function
I_min = min(inputs)
I_max = max(inputs)
:return: (step, num_gauss_neuron)
"""
# m = self.step
I_min, I_max = torch.min(inputs), torch.max(inputs)
mu = [i for i in range(0, m)]
mu = torch.ones((1, m)) * I_min + ((2 * torch.tensor(mu) - 3) / 2) * ((I_max-I_min) / (m -2))
sigma = (1 / 1.5) * ((I_max-I_min) / (m -2))
# shape = (self.step,) + inputs.shape
shape = (self.step,m)
popneurons_spike_t = torch.zeros(((m,) + inputs.shape))
for i in range(m):
popneurons_spike_t[i, :] = torch.exp(-(inputs - mu[0, i]) ** 2 / (2 * sigma * sigma))
spike_time = (self.step * popneurons_spike_t).type(torch.int)
spikes = torch.zeros(shape)
for spike_time_k in range(self.step):
if torch.where(spike_time == spike_time_k)[1].numel() != 0:
spikes[spike_time_k][torch.where(spike_time == spike_time_k)[0]] = 1
return spikes
@torch.no_grad()
def population_voltage(self, inputs, m, VTH):
'''
The more similar the input is to the mean,
the more sensitive the neuron corresponding to the mean is to the input.
You can change the maen.
:param inputs: (N_num, N_feature) array
:param m : the number of the gaussian neurons
:param VTH : threshold voltage
i : the i_th gauss_neuron
one feature will be encoded into gauss_neurons
the center of i-th neuron is: gauss -- \mu u_i = I_min + (2i-3)/2(I_max-I_min)/(m -2)
the width of i-th neuron is : gauss -- \sigma sigma_i = 1/1.5(I_max-I_min)/(m -2) 1.5: experience value
popneuron_v: gauss -- function
I_min = min(inputs)
I_max = max(inputs)
:return: (step, num_gauss_neuron, dim_inputs)
'''
ENCODER_REGULAR_VTH = VTH
I_min, I_max = torch.min(inputs), torch.max(inputs)
mu = [i for i in range(0, m)]
mu = torch.ones((1, m)) * I_min + ((2 * torch.tensor(mu) - 3) / 2) * ((I_max-I_min) / (m -2))
sigma = (1 / 1.5) * ((I_max-I_min) / (m -2))
popneuron_v = torch.zeros(((m,) + inputs.shape))
delta_v = torch.zeros(((m,) + inputs.shape))
for i in range(m):
delta_v[i] = torch.exp(-(inputs - mu[0, i]) ** 2 / (2 * sigma * sigma))
spikes = torch.zeros((self.step,) + ((m,) + inputs.shape))
for spike_time_k in range(self.step):
popneuron_v = popneuron_v + delta_v
spikes[spike_time_k][torch.where(popneuron_v.ge(ENCODER_REGULAR_VTH))] = 1
popneuron_v = popneuron_v - spikes[spike_time_k] * ENCODER_REGULAR_VTH
popneuron_rate = torch.sum(spikes, dim=0)/self.step
return spikes, popneuron_rate
## test
# if __name__ == '__main__':
# a = (torch.rand((2,4))*10).type(torch.int)
# print(a)
# pencoder = PEncoder(10, 'population_time')
# spikes=pencoder(inputs=a, num_popneurons=3)
# print(spikes, spikes.shape)
# pencoder = PEncoder(10, 'population_voltage')
# spikes, popneuron_rate = pencoder(inputs=a, num_popneurons=5, VTH=0.99)
# print(spikes, spikes.shape)
================================================
FILE: braincog/base/encoder/qs_coding.py
================================================
from signal import signal
from subprocess import call
import numpy as np
import random
import copy
class QSEncoder:
"""
QS Encoding.
:param lambda_max: 最大发放率
:param steps: 脉冲发放周期长度 T
:param sig_len: 脉冲发放窗口
:param shift: 是否反转背景
:param noise: 是否增加噪声
:param noise_rate: 噪声比例
:param eps: 防止溢出参数
"""
def __init__(self,
lambda_max,
steps,
sig_len,
shift=False,
noise=None,
noise_rate=None,
eps=1e-6
) -> None:
self._lambda_max = lambda_max
self._steps = steps
self._sig_len = sig_len
self._shift = shift
self._noise = noise
self._noise_rate = noise_rate
self._eps = eps
def __call__(self, image, image_delta, image_ori, image_ori_delta):
"""
将图片转换为脉冲。
:param image: 背景反转图片
:param image_delta: 扰动图片,用于计算相位
:param image_ori: 原始图片
:param image_ori_delta: 原始扰动图片
"""
if self._noise:
signals = self.noise_trans(image, image_ori, image_ori_delta)
elif self._shift:
signals = self.shift_trans(image, image_delta, image_ori, image_ori_delta)
else:
signals = np.zeros((self.steps, image.shape[0]))
signal_possion = np.random.poisson(image, (self._sig_len, image.shape[0]))
signals[:self._sig_len] = signal_possion[:]
return signal.T
def shift_trans(self, image, image_delta, image_ori, image_ori_delta):
"""
背景翻转图片转脉冲序列。
:param image: 背景反转图片
:param image_delta: 扰动图片,用于计算相位
:param image_ori: 原始图片
:param image_ori_delta: 原始扰动图片
"""
signal = np.zeros((self._steps, image.shape[0]))
assert image_ori is not None
assert self.noise is False
assert image_delta is not None
assert image_ori_delta is not None
image_ori_reverse = self._lambda_max - image_ori
image_ori_delta_reverse = self._lambda_max - image_ori_delta
zeta = image / (image_ori**2 + image_ori_reverse**2) ** 0.5
zeta_delta = image_delta / (image_ori_delta**2 + image_ori_delta_reverse**2)**0.5
idx_left = zeta < zeta_delta
phi = np.arctan(image_ori / (image_ori_reverse + self._eps))
zeta = np.clip(zeta, -1, 1)
zeta = np.arcsin(zeta)
theta1 = zeta - phi
theta2 = np.pi - zeta - phi
theta = np.zeros(theta1.shape)
theta[idx_left] = theta1[idx_left]
theta[~idx_left] = theta2[~idx_left]
theta = np.mean(theta)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
spike_rate = np.abs((self._lambda_max * sin_theta - image) / (sin_theta - cos_theta + self._eps))
signal_possion = np.random.poisson(spike_rate, (self._sig_len, spike_rate.shape[0]))
shift_step = np.rint(np.clip(2 * theta / np.pi, a_min=0, a_max=1.0) * (self._steps - self._sig_len))
shift_step = shift_step.astype(np.int)
signal[shift_step:shift_step + self._sig_len] = signal_possion[:]
def noise_trans(self, image, image_ori, image_ori_delta):
"""
噪声图片转脉冲序列
:param image: 背景反转图片
:param image_ori: 原始图片
:param image_ori_delta: 原始扰动图片
"""
signal = np.zeros((self._steps, image.shape[0]))
assert image_ori is not None
assert self._shift is False
assert self._noise_rate is not None
image_ori_delta = copy.deepcopy(image_ori)
idx = image_ori_delta < (self._lambda_max - 0.001)
image_ori_delta[idx] += 0.001
image_ori_reverse = self._lambda_max - image_ori
image_ori_delta_reverse = self._lambda_max - image_ori_delta
image_noise, image_delta_noise = self.reverse_pixels(image_ori, image_ori_delta, noise_rate=self._noise_rate)
zeta = image_noise / (image_ori**2 + image_ori_reverse**2)**0.5
zeta_delta = image_delta_noise / (image_ori_delta**2 + image_ori_delta_reverse**2)**0.5
idx_left = zeta < zeta_delta
phi = np.arctan(image_ori / (image_ori_reverse + self._eps))
zeta = np.clip(zeta, -1, 1)
zeta = np.arcsin(zeta)
theta1 = zeta - phi
theta2 = np.pi - zeta - phi
theta = np.zeros(theta1.shape)
theta[idx_left] = theta1[idx_left]
theta[~idx_left] = theta2[~idx_left]
theta = np.mean(theta)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
spike_rate = np.abs((self._lambda_max * sin_theta - image_noise) / (sin_theta - cos_theta + self._eps))
signal_possion = np.random.poisson(spike_rate, (self._sig_len, spike_rate.shape[0]))
shift_step = np.rint(np.clip(2 * theta / np.pi, a_min=0, a_max=1.0) * (self._steps - self._sig_len))
shift_step = shift_step.astype(np.int)
signal[shift_step:shift_step + self._sig_len] = signal_possion[:]
return signal
def reverse_pixels(self, image, image_delta, noise_rate, flip_bits=None):
"""
反转图片像素
"""
if flip_bits is None:
N = int(noise_rate * image.shape[0])
flip_bits = random.sample(range(image.shape[0]), N)
img = copy.copy(image)
img_delta = copy.copy(image_delta)
img[flip_bits] = self._lambda_max - img[flip_bits]
img_delta[flip_bits] = self._lambda_max - img_delta[flip_bits]
return img, img_delta
================================================
FILE: braincog/base/learningrule/BCM.py
================================================
import numpy as np
import torch
import os
import sys
from torch import nn
from torch.nn import Parameter
import abc
import math
from abc import ABC
import numpy as np
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from braincog.base.node import *
class BCM(nn.Module):
"""
BCM learning rule 多组神经元输入到该节点
"""
def __init__(self, node, connection, cfunc=None, weightdecay=0.99, tau=10):
"""
:param node:node神经元类型实例如IFNode LIFNode
:param connection:连接 类的实例列表 里面只能有一个操作
:param cfunc:BCM的频率函数 默认y(y-th)
:param weightdecay:权重衰减系数 默认0.99
:param tau: 频率更新时间常数
"""
super().__init__()
self.node = node
self.connection = connection
if not isinstance(connection, list):
self.connection = [self.connection]
self.weightdecay = weightdecay
self.tau = tau
self.threshold = 0
def forward(self, *x):
"""
计算前向传播过程
:return:s是脉冲 dw更新量
"""
i = 0
x = [xi.clone().detach() for xi in x]
for xi, coni in zip(x, self.connection):
i += coni(xi)
with torch.no_grad():
s = self.node(i)
i.data += self.cfunc(s) - i.data
dw = torch.autograd.grad(outputs=i, inputs=[i.weight for i in self.connection], grad_outputs=i)
for dwi, i in zip(dw, self.connection):
dwi -= (1 - self.weightdecay) * i.weight
return s, dw
def cfunc(self, s):
self.threshold = ((self.tau - 1) * self.threshold + s) / self.tau
return (s * (s - self.threshold)).detach()
def reset(self):
"""
重置
"""
self.threshold = 0
pass
================================================
FILE: braincog/base/learningrule/Hebb.py
================================================
import numpy as np
import torch
import os
import sys
from torch import nn
from torch.nn import Parameter
import abc
import math
from abc import ABC
import numpy as np
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from braincog.base.node.node import *
class Hebb(nn.Module):
"""
Hebb learning rule 多组神经元输入到该节点
"""
def __init__(self, node, connection):
"""
:param node:node神经元类型实例如IFNode LIFNode
:param connection:连接 类的实例列表 里面只能有一个操作
"""
super().__init__()
self.node = node
self.connection = connection
self.trace = [None for i in self.connection]
def forward(self, *x):
"""
计算前向传播过程
:return:s是脉冲 dw更新量
"""
i = 0
x = [xi.clone().detach() for xi in x]
for xi, coni in zip(x, self.connection):
i += coni(xi)
with torch.no_grad():
s = self.node(i)
i.data += s - i.data
dw = torch.autograd.grad(outputs=i, inputs=[i.weight for i in self.connection], grad_outputs=i)
return s, dw
def reset(self):
"""
重置
"""
self.trace = [None for i in self.connection]
if __name__ == "__main__":
node = IFNode()
linear1 = nn.Linear(2, 2, bias=False)
linear2 = nn.Linear(2, 2, bias=False)
linear1.weight.data = torch.tensor([[1., 1], [1, 1]], requires_grad=True)
linear2.weight.data = torch.tensor([[1., 1], [1, 1]], requires_grad=True)
hebb = Hebb(node, [linear1, linear2])
for i in range(10):
x, dw1 = hebb(torch.tensor([1.1, 1.1]), torch.tensor([1.1, 1.1]))
print(dw1)
================================================
FILE: braincog/base/learningrule/RSTDP.py
================================================
import numpy as np
import torch
import os
import sys
from torch import nn
from torch.nn import Parameter
import abc
import math
from abc import ABC
import numpy as np
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from braincog.base.node import *
class RSTDP(nn.Module):
"""
RSTDP算法
"""
def __init__(self, node, connection, decay=0.99, reward_decay=0.5):
"""
:param node:node神经元类型实例如IFNode LIFNode
:param connection:连接 类的实例列表 里面只能有一个操作
"""
super().__init__()
self.node = node
self.connection = connection
if not isinstance(connection, list):
self.connection = [self.connection]
self.trace = [None for i in self.connection]
self.decay = decay
self.reward_decay = reward_decay
self.stdp = STDP(self.node, self.node, self.decay)
def forward(self, *x, r):
"""
计算前向传播过程
:return:s是脉冲 dw更新量
"""
s, dw = self.stdp(x)
trace = self.cal_trace(r)
return s, dw * trace
def cal_trace(self, x):
"""
计算trace
"""
for i in range(len(x)):
if self.trace[i] is None:
self.trace[i] = Parameter(x[i].clone().detach(), requires_grad=False)
else:
self.trace[i] *= self.decay
self.trace[i] += x[i].detach()
return self.trace
def reset(self):
self.trace = [None for i in self.connection]
================================================
FILE: braincog/base/learningrule/STDP.py
================================================
import numpy as np
import torch
import os
import sys
from torch import nn
from torch.nn import Parameter
import abc
import math
from abc import ABC
import numpy as np
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from braincog.base.node.node import *
class STDP(nn.Module):
"""
STDP learning rule
"""
def __init__(self, node, connection, decay=0.99):
"""
:param node:node神经元类型实例如IFNode LIFNode
:param connection:连接 类的实例 里面只能有一个操作
"""
super().__init__()
self.node = node
self.connection = connection
self.trace = None
self.decay = decay
def forward(self, x):
"""
计算前向传播过程
:return:s是脉冲 dw更新量
"""
x = x.clone().detach()
i = self.connection(x)
with torch.no_grad():
s = self.node(i)
i.data += s - i.data
trace = self.cal_trace(x)
x.data += trace - x.data
dw = torch.autograd.grad(outputs=i, inputs=self.connection.weight, grad_outputs=i)
return s, dw
def cal_trace(self, x):
"""
计算trace
"""
if self.trace is None:
self.trace = Parameter(x.clone().detach(), requires_grad=False)
else:
self.trace *= self.decay
self.trace += x
return self.trace.detach()
def reset(self):
"""
重置
"""
self.trace = None
class MutliInputSTDP(nn.Module):
"""
STDP learning rule 多组神经元输入到该节点
"""
def __init__(self, node, connection, decay=0.99):
"""
:param node:node神经元类型实例如IFNode LIFNode
:param connection:连接 类的实例列表 里面只能有一个操作
"""
super().__init__()
self.node = node
self.connection = connection
self.trace = [None for i in self.connection]
self.decay = decay
def forward(self, *x):
"""
计算前向传播过程
:return:s是脉冲 dw更新量
"""
i = 0
x = [xi.clone().detach() for xi in x]
for xi, coni in zip(x, self.connection):
i += coni(xi)
with torch.no_grad():
s = self.node(i)
i.data += s - i.data
trace = self.cal_trace(x)
for xi, ti in zip(x, trace):
xi.data += ti - xi.data
dw = torch.autograd.grad(outputs=i, inputs=[i.weight for i in self.connection], grad_outputs=i)
return s, dw
def cal_trace(self, x):
"""
计算trace
"""
for i in range(len(x)):
if self.trace[i] is None:
self.trace[i] = Parameter(x[i].clone().detach(), requires_grad=False)
else:
self.trace[i] *= self.decay
self.trace[i] += x[i].detach()
return self.trace
def reset(self):
"""
重置
"""
self.trace = [None for i in self.connection]
class LTP(MutliInputSTDP):
"""
STDP learning rule 多组神经元输入到该节点
"""
pass
class LTD(nn.Module):
"""
STDP learning rule 多组神经元输入到该节点
"""
def __init__(self, node, connection, decay=0.99):
"""
:param node:node神经元类型实例如IFNode LIFNode
:param connection:连接 类的实例列表 里面只能有一个操作
"""
super().__init__()
self.node = node
self.connection = connection
self.trace = None
self.decay = decay
def forward(self, *x):
"""
计算前向传播过程
:return:s是脉冲 dw更新量
"""
i = 0
x = [xi.clone().detach() for xi in x]
for xi, coni in zip(x, self.connection):
i += coni(xi)
with torch.no_grad():
s = self.node(i)
trace = self.cal_trace(s)
i.data += trace - i.data
dw = torch.autograd.grad(outputs=i, inputs=[i.weight for i in self.connection], grad_outputs=i)
return s, dw
def cal_trace(self, x):
"""
计算trace
"""
if self.trace is None:
self.trace = Parameter(torch.zeros_like(x), requires_grad=False)
else:
self.trace *= self.decay
trace = self.trace.clone().detach()
self.trace += x
return trace
def reset(self):
"""
重置
"""
self.trace = None
class FullSTDP(nn.Module):
"""
STDP learning rule 多组神经元输入到该节点
"""
def __init__(self, node, connection, decay=0.99, decay2=0.99):
"""
:param node:node神经元类型实例如IFNode LIFNode
:param connection:连接 类的实例列表 里面只能有一个操作
"""
super().__init__()
self.node = node
self.connection = connection
self.tracein = [None for i in self.connection]
self.traceout = None
self.decay = decay
self.decay2 = decay2
def forward(self, *x):
"""
计算前向传播过程
:return:s是脉冲 dw更新量
"""
i = 0
x = [xi.clone().detach() for xi in x]
for xi, coni in zip(x, self.connection):
i += coni(xi)
with torch.no_grad():
s = self.node(i)
traceout = self.cal_traceout(s)
i.data += traceout - i.data
dw1 = torch.autograd.grad(outputs=i, inputs=[i.weight for i in self.connection], retain_graph=True,
grad_outputs=i)
with torch.no_grad():
i.data += s - i.data
tracein = self.cal_tracein(x)
for xi, ti in zip(x, tracein):
xi.data += ti - xi.data
dw2 = torch.autograd.grad(outputs=i, inputs=[i.weight for i in self.connection], grad_outputs=i)
return s, dw2, dw1
def cal_tracein(self, x):
"""
计算trace
"""
for i in range(len(x)):
if self.tracein[i] is None:
self.tracein[i] = Parameter(x[i].clone().detach(), requires_grad=False)
else:
self.tracein[i] *= self.decay
self.tracein[i] += x[i].detach()
return self.tracein
def cal_traceout(self, x):
"""
计算trace
"""
if self.traceout is None:
self.traceout = Parameter(torch.zeros_like(x), requires_grad=False)
else:
self.traceout *= self.decay2
trace = self.traceout.clone().detach()
self.traceout += x
return trace
def reset(self):
"""
重置
"""
self.traceout = [None for i in self.connection]
self.tracein = None
if __name__ == "__main__":
node = IFNode()
linear1 = nn.Linear(2, 2, bias=False)
linear2 = nn.Linear(2, 2, bias=False)
linear1.weight.data = torch.tensor([[1., 1], [1, 1]], requires_grad=True)
linear2.weight.data = torch.tensor([[1., 1], [1, 1]], requires_grad=True)
stdp = LTD(node, [linear1, linear2])
for i in range(10):
x, dw1 = stdp(torch.tensor([1.1, 1.1]), torch.tensor([1.1, 1.1]))
print(dw1)
================================================
FILE: braincog/base/learningrule/STP.py
================================================
import math
class short_time():
"""
计算短期突触可塑性的变量详见Tsodyks和Markram 1997
:param Syn:突出可塑性结构体
:param ISI:棘突间期
:param Nsp:突触前棘波
"""
def __init__(self, SizeHistOutput):
super().__init__()
self.SizeHistOutput = SizeHistOutput
def syndepr(self, Syn=None, ISI=None, Nsp=None):
"""
短期突触可塑性计算
"""
SizeHistOutput = self.SizeHistOutput
qu = Syn.uprev[Nsp] * math.exp(-ISI / Syn.tc_fac)
qR = math.exp(-ISI / Syn.tc_rec)
u = qu + Syn.use * (1.0 - qu)
R = Syn.Rprev[Nsp] * (1.0 - Syn.uprev[Nsp]) * qR + 1.0 - qR
Syn.uprev[(Nsp + 1) % SizeHistOutput] = u
Syn.Rprev[(Nsp + 1) % SizeHistOutput] = R
return R * u
def set_gsyn(self, np=None, dt=None, v=None, NoiseSyn=None):
"""
突触电流参数计算
"""
Isyn = 0
gsyn_AN = 0
gsyn_G = 0
for j in range(np.NumSynType):
syn = np.STList[j]
sgate = 1.0
if (syn.Mg_gate > 0.0):
sgate = syn.Mg_gate / (1.0 + syn.Mg_fac * math.exp(syn.Mg_slope * (syn.Mg_half - v[0])))
Isyn += sgate * (
np.gfOFFsyn[j] * math.exp(-dt / syn.tc_off) - np.gfONsyn[j] * math.exp(-dt / syn.tc_on)) * (
syn.Erev - v[0])
if (syn.Erev == 0.0):
gsyn_AN = gsyn_AN + sgate * (
np.gfOFFsyn[j] * math.exp(-dt / syn.tc_off) - np.gfONsyn[j] * math.exp(-dt / syn.tc_on))
else:
gsyn_G = gsyn_G + sgate * (
np.gfOFFsyn[j] * math.exp(-dt / syn.tc_off) - np.gfONsyn[j] * math.exp(-dt / syn.tc_on))
for j in range(NoiseSyn.NumSyn):
syn = NoiseSyn.Syn[j].STPtr
sgate = 1.0
if (syn.Mg_gate > 0.0):
sgate = syn.Mg_gate / (1.0 + syn.Mg_fac * math.exp(syn.Mg_slope * (syn.Mg_half - v)))
Isyn += sgate * (
np.gfOFFnoise[j] * math.exp(-dt / syn.tc_off) - np.gfONnoise[j] * math.exp(-dt / syn.tc_on)) * (
syn.Erev - v)
if (syn.Erev == 0.0):
gsyn_AN = gsyn_AN + sgate * (
np.gfOFFnoise[j] * math.exp(-dt / syn.tc_off) - np.gfONnoise[j] * math.exp(-dt / syn.tc_on))
else:
gsyn_G = gsyn_G + sgate * (
np.gfOFFnoise[j] * math.exp(-dt / syn.tc_off) - np.gfONnoise[j] * math.exp(-dt / syn.tc_on))
I_tot = Isyn + np.Iinj
return gsyn_AN, I_tot, gsyn_G
def IDderiv(self, np=None, v=None, dt=None, dv=None, NoiseSyn=None, flag_dv=None):
"""
定义模型的常微分方程计算单个神经元常微分方程
:param np:神经元参数
:param v:当前变量
:param dt:时间步长
"""
Isyn = 0
gsyn_G = 0
gsyn_AN = 0
for j in range(np.NumSynType):
syn = np.STList[j]
sgate = 1.0
if (syn.Mg_gate > 0.0):
sgate = syn.Mg_gate / (1.0 + syn.Mg_fac * math.exp(syn.Mg_slope * (syn.Mg_half - v[0])))
Isyn += sgate * (
np.gfOFFsyn[j] * math.exp(-dt / syn.tc_off) - np.gfONsyn[j] * math.exp(-dt / syn.tc_on)) * (
syn.Erev - v[0])
if (syn.Erev == 0.0):
gsyn_AN = gsyn_AN + sgate * (
np.gfOFFsyn[j] * math.exp(-dt / syn.tc_off) - np.gfONsyn[j] * math.exp(-dt / syn.tc_on))
else:
gsyn_G = gsyn_G + sgate * (
np.gfOFFsyn[j] * math.exp(-dt / syn.tc_off) - np.gfONsyn[j] * math.exp(-dt / syn.tc_on))
for j in range(NoiseSyn.NumSyn):
syn = NoiseSyn.Syn[j].STPtr
sgate = 1.0
if (syn.Mg_gate > 0.0):
sgate = syn.Mg_gate / (1.0 + syn.Mg_fac * math.exp(syn.Mg_slope * (syn.Mg_half - v[0])))
Isyn += sgate * (
np.gfOFFnoise[j] * math.exp(-dt / syn.tc_off) - np.gfONnoise[j] * math.exp(-dt / syn.tc_on)) * (
syn.Erev - v[0])
if (syn.Erev == 0.0):
gsyn_AN = gsyn_AN + sgate * (
np.gfOFFnoise[j] * math.exp(-dt / syn.tc_off) - np.gfONnoise[j] * math.exp(-dt / syn.tc_on))
else:
gsyn_G = gsyn_G + sgate * (
np.gfOFFnoise[j] * math.exp(-dt / syn.tc_off) - np.gfONnoise[j] * math.exp(-dt / syn.tc_on))
I_ex = np.gL * np.sf * math.exp((v[0] - np.Vth) / np.sf)
wV = np.Iinj + Isyn - np.gL * (v[0] - np.EL) + I_ex
D0 = (np.Cm / np.gL) * wV
if ((
np.Iinj + Isyn) >= np.I_ref and flag_dv == 0):
dv[0] = -(np.gL / np.Cm) * (v[0] - np.v_dep)
flag_regime_osc = 0
else:
dv[0] = (np.Iinj - np.gL * (v[0] - np.EL) - v[1] + I_ex + Isyn) / np.Cm
flag_regime_osc = 1
dD0 = np.Cm * (math.exp((v[0] - np.Vth) / np.sf) - 1)
if ((v[1] > wV - D0 / np.tcw) and (v[1] < wV + D0 / np.tcw) and v[0] <= np.Vth and (
np.Iinj + Isyn) < np.I_ref):
dv[1] = -(np.gL * (1 - math.exp((v[0] - np.Vth) / np.sf)) + dD0 / np.tcw) * dv[0]
else:
dv[1] = 0
I_tot = Isyn + np.Iinj
return wV, D0, gsyn_AN, gsyn_G, I_tot, dv
def update(self, np=None, dt=None, NoiseSyn=None, flag_dv=None):
"""
用二阶显式龙格-库塔法积分常微分方程
:param np:神经元参数
:param dt:时间步长
"""
nvar = 2
v = [0] * 2
dv1 = [0] * 2
dv2 = [0] * 2
for i in range(nvar):
v[i] = np.v[i]
wV, D0, gsyn_AN, gsyn_G, I_tot, dv1 = short_time(self.SizeHistOutput).IDderiv(np, v, 0.0, dv1, NoiseSyn, flag_dv)
for i in range(nvar):
v[i] += dt * dv1[i]
wV, D0, gsyn_AN, gsyn_G, I_tot, dv2 = short_time(self.SizeHistOutput).IDderiv(np, v, 0.0, dv2, NoiseSyn, flag_dv)
for i in range(nvar):
np.v[i] += dt / 2.0 * (dv1[i] + dv2[i])
np.dv[i] = dt / 2.0 * (dv1[i] + dv2[i])
if ((np.v[1] > wV - D0 / np.tcw) and (np.v[1] < wV + D0 / np.tcw) and np.v[0] <= np.Vth):
np.v[1] = wV - (D0 / np.tcw)
return np, gsyn_AN, gsyn_G, I_tot
================================================
FILE: braincog/base/learningrule/__init__.py
================================================
from .BCM import BCM
from .Hebb import Hebb
from .RSTDP import RSTDP
from .STDP import STDP, MutliInputSTDP, LTP, LTD, FullSTDP
from .STP import short_time
__all__ = [
'BCM',
"Hebb",
'RSTDP',
'STDP', 'MutliInputSTDP', 'LTP', 'LTD', 'FullSTDP',
'short_time'
]
================================================
FILE: braincog/base/node/__init__.py
================================================
from .node import *
================================================
FILE: braincog/base/node/node.py
================================================
# encoding: utf-8
# Author : Floyed<Floyed_Shen@outlook.com>
# Datetime : 2022/4/10 18:46
# User : Floyed
# Product : PyCharm
# Project : braincog
# File : node.py
# explain : 神经元节点类型
import abc
import math
from abc import ABC
import numpy as np
import random
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from einops import rearrange, repeat
from braincog.base.connection.layer import CustomLinear
from braincog.base.strategy.surrogate import *
class BaseNode(nn.Module, abc.ABC):
"""
神经元模型的基类
:param threshold: 神经元发放脉冲需要达到的阈值
:param v_reset: 静息电位
:param dt: 时间步长
:param step: 仿真步
:param requires_thres_grad: 是否需要计算对于threshold的梯度, 默认为 ``False``
:param sigmoid_thres: 是否使用sigmoid约束threshold的范围搭到 [0, 1], 默认为 ``False``
:param requires_fp: 是否需要在推理过程中保存feature map, 需要消耗额外的内存和时间, 默认为 ``False``
:param layer_by_layer: 是否以一次性计算所有step的输出, 在网络模型较大的情况下, 一般会缩短单次推理的时间, 默认为 ``False``
:param n_groups: 在不同的时间步, 是否使用不同的权重, 默认为 ``1``, 即不分组
:param mem_detach: 是否将上一时刻的膜电位在计算图中截断
:param args: 其他的参数
:param kwargs: 其他的参数
"""
def __init__(self,
threshold=.5,
v_reset=0.,
dt=1.,
step=8,
requires_thres_grad=False,
sigmoid_thres=False,
requires_fp=False,
layer_by_layer=False,
n_groups=1,
*args,
**kwargs):
super(BaseNode, self).__init__()
self.threshold = Parameter(torch.tensor(threshold), requires_grad=requires_thres_grad)
self.sigmoid_thres = sigmoid_thres
self.mem = 0.
self.spike = 0.
self.dt = dt
self.feature_map = []
self.mem_collect = []
self.requires_fp = requires_fp
self.v_reset = v_reset
self.step = step
self.layer_by_layer = layer_by_layer
self.groups = n_groups
self.mem_detach = kwargs['mem_detach'] if 'mem_detach' in kwargs else False
self.requires_mem = kwargs['requires_mem'] if 'requires_mem' in kwargs else False
@abc.abstractmethod
def calc_spike(self):
"""
通过当前的mem计算是否发放脉冲,并reset
:return: None
"""
pass
def integral(self, inputs):
"""
计算由当前inputs对于膜电势的累积
:param inputs: 当前突触输入电流
:type inputs: torch.tensor
:return: None
"""
pass
def get_thres(self):
return self.threshold if not self.sigmoid_thres else self.threshold.sigmoid()
def rearrange2node(self, inputs):
if self.groups != 1:
if len(inputs.shape) == 4:
outputs = rearrange(inputs, 'b (c t) w h -> t b c w h', t=self.step)
elif len(inputs.shape) == 2:
outputs = rearrange(inputs, 'b (c t) -> t b c', t=self.step)
else:
raise NotImplementedError
elif self.layer_by_layer:
if len(inputs.shape) == 4:
outputs = rearrange(inputs, '(t b) c w h -> t b c w h', t=self.step)
elif len(inputs.shape) == 3:
outputs = rearrange(inputs, '(t b) n c -> t b n c', t=self.step)
elif len(inputs.shape) == 2:
outputs = rearrange(inputs, '(t b) c -> t b c', t=self.step)
else:
raise NotImplementedError
else:
outputs = inputs
return outputs
def rearrange2op(self, inputs):
if self.groups != 1:
if len(inputs.shape) == 5:
outputs = rearrange(inputs, 't b c w h -> b (c t) w h')
elif len(inputs.shape) == 3:
outputs = rearrange(inputs, ' t b c -> b (c t)')
else:
raise NotImplementedError
elif self.layer_by_layer:
if len(inputs.shape) == 5:
outputs = rearrange(inputs, 't b c w h -> (t b) c w h')
elif len(inputs.shape) == 4:
outputs = rearrange(inputs, ' t b n c -> (t b) n c')
elif len(inputs.shape) == 3:
outputs = rearrange(inputs, ' t b c -> (t b) c')
else:
raise NotImplementedError
else:
outputs = inputs
return outputs
def forward(self, inputs):
"""
torch.nn.Module 默认调用的函数,用于计算膜电位的输入和脉冲的输出
在```self.requires_fp is True``` 的情况下,可以使得```self.feature_map```用于记录trace
:param inputs: 当前输入的膜电位
:return: 输出的脉冲
"""
if hasattr(self, 'parallel') and self.parallel is True:
inputs = self.rearrange2node(inputs)
if self.mem_detach and hasattr(self.mem, 'detach'):
self.mem = self.mem.detach()
self.spike = self.spike.detach()
self.integral(inputs)
self.calc_spike()
if self.requires_fp is True:
self.feature_map.append(self.spike)
if self.requires_mem is True:
self.mem_collect.append(self.mem)
return self.rearrange2op(self.spike)
elif self.layer_by_layer or self.groups != 1:
inputs = self.rearrange2node(inputs)
outputs = []
for i in range(self.step):
if self.mem_detach and hasattr(self.mem, 'detach'):
self.mem = self.mem.detach()
self.spike = self.spike.detach()
self.integral(inputs[i])
self.calc_spike()
if self.requires_fp is True:
self.feature_map.append(self.spike)
if self.requires_mem is True:
self.mem_collect.append(self.mem)
outputs.append(self.spike)
outputs = torch.stack(outputs)
outputs = self.rearrange2op(outputs)
return outputs
else:
if self.mem_detach and hasattr(self.mem, 'detach'):
self.mem = self.mem.detach()
self.spike = self.spike.detach()
self.integral(inputs)
self.calc_spike()
if self.requires_fp is True:
self.feature_map.append(self.spike)
if self.requires_mem is True:
self.mem_collect.append(self.mem)
return self.spike
def n_reset(self):
"""
神经元重置,用于模型接受两个不相关输入之间,重置神经元所有的状态
:return: None
"""
self.mem = self.v_reset
self.spike = 0.
self.feature_map = []
self.mem_collect = []
def get_n_attr(self, attr):
if hasattr(self, attr):
return getattr(self, attr)
else:
return None
def set_n_warm_up(self, flag):
"""
一些训练策略会在初始的一些epoch,将神经元视作ANN的激活函数训练,此为设置是否使用该方法训练
:param flag: True:神经元变为激活函数, False:不变
:return: None
"""
self.warm_up = flag
def set_n_threshold(self, thresh):
"""
动态设置神经元的阈值
:param thresh: 阈值
:return:
"""
self.threshold = Parameter(torch.tensor(thresh, dtype=torch.float), requires_grad=False)
def set_n_tau(self, tau):
"""
动态设置神经元的衰减系数,用于带Leaky的神经元
:param tau: 衰减系数
:return:
"""
if hasattr(self, 'tau'):
self.tau = Parameter(torch.tensor(tau, dtype=torch.float), requires_grad=False)
else:
raise NotImplementedError
#============================================================================
# node的基类
class BaseMCNode(nn.Module, abc.ABC):
"""
多房室神经元模型的基类
:param threshold: 神经元发放脉冲需要达到的阈值
:param v_reset: 静息电位
:param comps: 神经元不同房室, 例如["apical", "basal", "soma"]
"""
def __init__(self,
threshold=1.0,
v_reset=0.,
comps=[]):
super().__init__()
self.threshold = Parameter(torch.tensor(threshold), requires_grad=False)
# self.decay = Parameter(torch.tensor(decay), requires_grad=False)
self.v_reset = v_reset
assert len(comps) != 0
self.mems = dict()
for c in comps:
self.mems[c] = None
self.spike = None
self.warm_up = False
@abc.abstractmethod
def calc_spike(self):
pass
@abc.abstractmethod
def integral(self, inputs):
pass
def forward(self, inputs: dict):
'''
Params:
inputs dict: Inputs for every compartments of neuron
'''
if self.warm_up:
return inputs
else:
self.integral(**inputs)
self.calc_spike()
return self.spike
def n_reset(self):
for c in self.mems.keys():
self.mems[c] = self.v_reset
self.spike = 0.0
def get_n_fire_rate(self):
if self.spike is None:
return 0.
return float((self.spike.detach() >= self.threshold).sum()) / float(np.product(self.spike.shape))
def set_n_warm_up(self, flag):
self.warm_up = flag
def set_n_threshold(self, thresh):
self.threshold = Parameter(torch.tensor(thresh, dtype=torch.float), requires_grad=False)
class ThreeCompNode(BaseMCNode):
"""
三房室神经元模型
:param threshold: 神经元发放脉冲需要达到的阈值
:param v_reset: 静息电位
:param tau: 胞体膜电位时间常数, 用于控制胞体膜电位衰减
:param tau_basal: 基底树突膜电位时间常数, 用于控制基地树突胞体膜电位衰减
:param tau_apical: 远端树突膜电位时间常数, 用于控制远端树突胞体膜电位衰减
:param comps: 神经元不同房室, 例如["apical", "basal", "soma"]
:param act_fun: 脉冲梯度代理函数
"""
def __init__(self,
threshold=1.0,
tau=2.0,
tau_basal=2.0,
tau_apical=2.0,
v_reset=0.0,
comps=['basal', 'apical', 'soma'],
act_fun=AtanGrad):
g_B = 0.6
g_L = 0.05
super().__init__(threshold, v_reset, comps)
self.tau = tau
self.tau_basal = tau_basal
self.tau_apical = tau_apical
self.act_fun = act_fun(alpha=tau, requires_grad=False)
def integral(self, basal_inputs, apical_inputs):
'''
Params:
inputs torch.Tensor: Inputs for basal dendrite
'''
self.mems['basal'] = (self.mems['basal'] + basal_inputs) / self.tau_basal
self.mems['apical'] = (self.mems['apical'] + apical_inputs) / self.tau_apical
self.mems['soma'] = self.mems['soma'] + (self.mems['apical'] + self.mems['basal'] - self.mems['soma']) / self.tau
def calc_spike(self):
self.spike = self.act_fun(self.mems['soma'] - self.threshold)
self.mems['soma'] = self.mems['soma'] * (1. - self.spike.detach())
self.mems['basal'] = self.mems['basal'] * (1. - self.spike.detach())
self.mems['apical'] = self.mems['apical'] * (1. - self.spike.detach())
#============================================================================
# 用于静态测试 使用ANN的情况 不累积电位
class ReLUNode(BaseNode):
"""
用于相同连接的ANN的测试
"""
def __init__(self,
*args,
**kwargs):
super().__init__(requires_fp=False, *args, **kwargs)
self.act_fun = nn.ReLU()
def forward(self, x):
"""
参考```BaseNode```
:param x:
:return:
"""
self.spike = self.act_fun(x)
if self.requires_fp is True:
self.feature_map.append(self.spike)
if self.requires_mem is True:
self.mem_collect.append(self.mem)
return self.spike
def calc_spike(self):
pass
class BiasReLUNode(BaseNode):
"""
用于相同连接的ANN的测试, 会在每个时刻注入恒定电流, 使得神经元更容易激发
"""
def __init__(self,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.act_fun = nn.ReLU()
def forward(self, x):
self.spike = self.act_fun(x + 0.1)
if self.requires_fp is True:
self.feature_map += self.spike
return self.spike
def calc_spike(self):
pass
# ============================================================================
# 用于SNN的node
class IFNode(BaseNode):
"""
Integrate and Fire Neuron
:param threshold: 神经元发放脉冲需要达到的阈值
:param v_reset: 静息电位
:param dt: 时间步长
:param step: 仿真步
:param act_fun: 使用surrogate gradient 对梯度进行近似, 默认为 ``surrogate.AtanGrad``
:param requires_thres_grad: 是否需要计算对于threshold的梯度, 默认为 ``False``
:param sigmoid_thres: 是否使用sigmoid约束threshold的范围搭到 [0, 1], 默认为 ``False``
:param requires_fp: 是否需要在推理过程中保存feature map, 需要消耗额外的内存和时间, 默认为 ``False``
:param layer_by_layer: 是否以一次性计算所有step的输出, 在网络模型较大的情况下, 一般会缩短单次推理的时间, 默认为 ``False``
:param n_groups: 在不同的时间步, 是否使用不同的权重, 默认为 ``1``, 即不分组
:param args: 其他的参数
:param kwargs: 其他的参数
"""
def __init__(self, threshold=.5, act_fun=AtanGrad, *args, **kwargs):
"""
:param threshold:
:param act_fun:
:param args:
:param kwargs:
"""
super().__init__(threshold, *args, **kwargs)
if isinstance(act_fun, str):
act_fun = eval(act_fun)
self.act_fun = act_fun(alpha=2., requires_grad=False)
def integral(self, inputs):
self.mem = self.mem + inputs * self.dt
def calc_spike(self):
self.spike = self.act_fun(self.mem - self.get_thres())
self.mem = self.mem * (1 - self.spike.detach())
class LIFNode(BaseNode):
"""
Leaky Integrate and Fire
:param threshold: 神经元发放脉冲需要达到的阈值
:param v_reset: 静息电位
:param dt: 时间步长
:param step: 仿真步
:param tau: 膜电位时间常数, 用于控制膜电位衰减
:param act_fun: 使用surrogate gradient 对梯度进行近似, 默认为 ``surrogate.AtanGrad``
:param requires_thres_grad: 是否需要计算对于threshold的梯度, 默认为 ``False``
:param sigmoid_thres: 是否使用sigmoid约束threshold的范围搭到 [0, 1], 默认为 ``False``
:param requires_fp: 是否需要在推理过程中保存feature map, 需要消耗额外的内存和时间, 默认为 ``False``
:param layer_by_layer: 是否以一次性计算所有step的输出, 在网络模型较大的情况下, 一般会缩短单次推理的时间, 默认为 ``False``
:param n_groups: 在不同的时间步, 是否使用不同的权重, 默认为 ``1``, 即不分组
:param args: 其他的参数
:param kwargs: 其他的参数
"""
def __init__(self, threshold=0.5, tau=2., act_fun=QGateGrad, *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
self.tau = tau
if isinstance(act_fun, str):
act_fun = eval(act_fun)
self.act_fun = act_fun(alpha=2., requires_grad=False)
# self.threshold = threshold
# print(threshold)
# print(tau)
def integral(self, inputs):
self.mem = self.mem + (inputs - self.mem) / self.tau
def calc_spike(self):
self.spike = self.act_fun(self.mem - self.threshold)
self.mem = self.mem * (1 - self.spike.detach())
class BurstLIFNode(LIFNode):
def __init__(self, threshold=.5, tau=2., act_fun=RoundGrad, *args, **kwargs):
super().__init__(threshold=threshold, tau=tau, act_fun=act_fun, *args, **kwargs)
self.burst_factor = 1.5
def calc_spike(self):
LIFNode.calc_spike(self)
self.spike = torch.where(self.spike > 1., self.burst_factor * self.spike, self.spike)
class BackEINode(BaseNode):
"""
BackEINode with self feedback connection and excitatory and inhibitory neurons
Reference:https://www.sciencedirect.com/science/article/pii/S0893608022002520
:param threshold: 神经元发放脉冲需要达到的阈值
:param if_back whether to use self feedback
:param if_ei whether to use excitotory and inhibitory neurons
:param args: 其他的参数
:param kwargs: 其他的参数
"""
def __init__(self, threshold=0.5, decay=0.2, act_fun=BackEIGateGrad, th_fun=EIGrad, channel=40, if_back=True,
if_ei=True, cfg_backei=2, *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
self.decay = decay
if isinstance(act_fun, str):
act_fun = eval(act_fun)
if isinstance(th_fun, str):
th_fun = eval(th_fun)
self.act_fun = act_fun()
self.th_fun = th_fun()
self.channel = channel
self.if_back = if_back
if self.if_back:
self.back = nn.Conv2d(channel, channel, kernel_size=2 * cfg_backei+1, stride=1, padding=cfg_backei)
self.if_ei = if_ei
if self.if_ei:
self.ei = nn.Conv2d(channel, channel, kernel_size=2 * cfg_backei+1, stride=1, padding=cfg_backei)
def integral(self, inputs):
if self.mem is None:
self.mem = torch.zeros_like(inputs)
self.spike = torch.zeros_like(inputs)
self.mem = self.decay * self.mem
if self.if_back:
self.mem += F.sigmoid(self.back(self.spike)) * inputs
else:
self.mem += inputs
def calc_spike(self):
if self.if_ei:
ei_gate = self.th_fun(self.ei(self.mem))
self.spike = self.act_fun(self.mem-self.threshold)
self.mem = self.mem * (1 - self.spike)
self.spike = ei_gate * self.spike
else:
self.spike = self.act_fun(self.mem-self.threshold)
self.mem = self.mem * (1 - self.spike)
def n_reset(self):
self.mem = None
self.spike = None
self.feature_map = []
self.mem_collect = []
class NoiseLIFNode(LIFNode):
"""
Noisy Leaky Integrate and Fire
在神经元中注入噪声, 默认的噪声分布为 ``Beta(log(2), log(6))``
:param threshold: 神经元发放脉冲需要达到的阈值
:param v_reset: 静息电位
:param dt: 时间步长
:param step: 仿真步
:param tau: 膜电位时间常数, 用于控制膜电位衰减
:param act_fun: 使用surrogate gradient 对梯度进行近似, 默认为 ``surrogate.AtanGrad``
:param requires_thres_grad: 是否需要计算对于threshold的梯度, 默认为 ``False``
:param sigmoid_thres: 是否使用sigmoid约束threshold的范围搭到 [0, 1], 默认为 ``False``
:param requires_fp: 是否需要在推理过程中保存feature map, 需要消耗额外的内存和时间, 默认为 ``False``
:param layer_by_layer: 是否以一次性计算所有step的输出, 在网络模型较大的情况下, 一般会缩短单次推理的时间, 默认为 ``False``
:param n_groups: 在不同的时间步, 是否使用不同的权重, 默认为 ``1``, 即不分组
:param log_alpha: 控制 beta 分布的参数 ``a``
:param log_beta: 控制 beta 分布的参数 ``b``
:param args: 其他的参数
:param kwargs: 其他的参数
"""
def __init__(self,
threshold=1,
tau=2.,
act_fun=GateGrad,
log_alpha=np.log(2),
log_beta=np.log(6),
*args,
**kwargs):
super().__init__(threshold=threshold, tau=tau, act_fun=act_fun, *args, **kwargs)
self.log_alpha = Parameter(torch.as_tensor(log_alpha), requires_grad=True)
self.log_beta = Parameter(torch.as_tensor(log_beta), requires_grad=True)
# self.fc = nn.Sequential(
# nn.Linear(1, 5),
# nn.ReLU(),
# nn.Linear(5, 5),
# nn.ReLU(),
# nn.Linear(5, 2)
# )
def integral(self, inputs): # b, c, w, h / b, c
# self.mu, self.log_var = self.fc(inputs.mean().unsqueeze(0)).split(1)
alpha, beta = torch.exp(self.log_alpha), torch.exp(self.log_beta)
mu = alpha / (alpha + beta)
var = ((alpha + 1) * alpha) / ((alpha + beta + 1) * (alpha + beta))
noise = torch.distributions.beta.Beta(alpha, beta).sample(inputs.shape) * self.get_thres()
noise = noise * var / var.detach() + mu - mu.detach()
self.mem = self.mem + ((inputs - self.mem) / self.tau + noise) * self.dt
class BiasLIFNode(BaseNode):
"""
带有恒定电流输入Bias的LIF神经元,用于带有抑制性/反馈链接的网络的测试
Noisy Leaky Integrate and Fire
在神经元中注入噪声, 默认的噪声分布为 ``Beta(log(2), log(6))``
:param threshold: 神经元发放脉冲需要达到的阈值
:param v_reset: 静息电位
:param dt: 时间步长
:param step: 仿真步
:param tau: 膜电位时间常数, 用于控制膜电位衰减
:param act_fun: 使用surrogate gradient 对梯度进行近似, 默认为 ``surrogate.AtanGrad``
:param requires_thres_grad: 是否需要计算对于threshold的梯度, 默认为 ``False``
:param sigmoid_thres: 是否使用sigmoid约束threshold的范围搭到 [0, 1], 默认为 ``False``
:param requires_fp: 是否需要在推理过程中保存feature map, 需要消耗额外的内存和时间, 默认为 ``False``
:param layer_by_layer: 是否以一次性计算所有step的输出, 在网络模型较大的情况下, 一般会缩短单次推理的时间, 默认为 ``False``
:param n_groups: 在不同的时间步, 是否使用不同的权重, 默认为 ``1``, 即不分组
:param args: 其他的参数
:param kwargs: 其他的参数
"""
def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
self.tau = tau
if isinstance(act_fun, str):
act_fun = eval(act_fun)
self.act_fun = act_fun(alpha=2., requires_grad=False)
def integral(self, inputs):
self.mem = self.mem + ((inputs - self.mem) / self.tau) * self.dt + 0.1
def calc_spike(self):
self.spike = self.act_fun(self.mem - self.get_thres())
self.mem = self.mem * (1 - self.spike.detach())
class LIFSTDPNode(BaseNode):
"""
用于执行STDP运算时使用的节点 decay的方式是膜电位乘以decay并直接加上输入电流
"""
def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
self.tau = tau
if isinstance(act_fun, str):
act_fun = eval(act_fun)
self.act_fun = act_fun(alpha=2., requires_grad=False)
def integral(self, inputs):
self.mem = self.mem * self.tau + inputs
def calc_spike(self):
self.spike = self.act_fun(self.mem - self.threshold)
# print(( self.threshold).max())
self.mem = self.mem * (1 - self.spike.detach())
def requires_activation(self):
return False
class PLIFNode(BaseNode):
"""
Parametric LIF, 其中的 ```tau``` 会被backward过程影响
Reference:https://arxiv.org/abs/2007.05785
:param threshold: 神经元发放脉冲需要达到的阈值
:param v_reset: 静息电位
:param dt: 时间步长
:param step: 仿真步
:param tau: 膜电位时间常数, 用于控制膜电位衰减
:param act_fun: 使用surrogate gradient 对梯度进行近似, 默认为 ``surrogate.AtanGrad``
:param requires_thres_grad: 是否需要计算对于threshold的梯度, 默认为 ``False``
:param sigmoid_thres: 是否使用sigmoid约束threshold的范围搭到 [0, 1], 默认为 ``False``
:param requires_fp: 是否需要在推理过程中保存feature map, 需要消耗额外的内存和时间, 默认为 ``False``
:param layer_by_layer: 是否以一次性计算所有step的输出, 在网络模型较大的情况下, 一般会缩短单次推理的时间, 默认为 ``False``
:param n_groups: 在不同的时间步, 是否使用不同的权重, 默认为 ``1``, 即不分组
:param args: 其他的参数
:param kwargs: 其他的参数
"""
def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
init_w = -math.log(tau - 1.)
if isinstance(act_fun, str):
act_fun = eval(act_fun)
self.act_fun = act_fun(alpha=2., requires_grad=True)
self.w = nn.Parameter(torch.as_tensor(init_w))
def integral(self, inputs):
self.mem = self.mem + ((inputs - self.mem) * self.w.sigmoid()) * self.dt
def calc_spike(self):
self.spike = self.act_fun(self.mem - self.get_thres())
self.mem = self.mem * (1 - self.spike.detach())
class PSU(BaseNode):
def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
init_w = -math.log(tau - 1.)
if isinstance(act_fun, str):
act_fun = eval(act_fun)
self.parallel = True
self.act_fun = act_fun(alpha=2., requires_grad=True)
T = self.step
m1, m2 = generate_matrix(T, tau)
self.register_buffer('m1', m1)
self.register_buffer('m2', m2)
self.m2 *= self.threshold
def integral(self, inputs):
d1 = self.m1 @ inputs.flatten(1)
self.mem = (d1 + self.m2 @ d1.sigmoid()).view(inputs.shape)
def calc_spike(self):
self.spike = self.act_fun(self.mem - self.threshold)
class IPSU(BaseNode):
def masked_weight(self):
return self.fc.weight * self.mask0
def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
init_w = -math.log(tau - 1.)
if isinstance(act_fun, str):
act_fun = eval(act_fun)
self.parallel = True
self.act_fun = act_fun(alpha=2., requires_grad=True)
T = self.step
matrix, matrix2 = generate_matrix(T, tau)
self.register_buffer('m1', matrix)
self.register_buffer('m2', matrix2)
# self.m2 *= self.threshold
self.fc = nn.Linear(T, T)
nn.init.constant_(self.fc.bias, 0.)
nn.init.kaiming_normal_(self.fc.weight, mode='fan_out', nonlinearity='relu')
mask0 = torch.tril(torch.ones([T, T]))
self.register_buffer('mask0', mask0)
def integral(self, inputs):
d1 = torch.addmm(self.fc.bias.unsqueeze(1), self.masked_weight(), inputs.flatten((1)))
self.mem = (d1 + self.m2 @ inputs.flatten(1)).view(inputs.shape)
def calc_spike(self):
self.spike = self.act_fun(self.mem - self.threshold)
class RPSU(BaseNode):
def masked_weight(self):
return self.fc.weight * self.mask0
def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
init_w = -math.log(tau - 1.)
if isinstance(act_fun, str):
act_fun = eval(act_fun)
self.parallel = True
self.act_fun = act_fun(alpha=2., requires_grad=True)
T = self.step
matrix, matrix2 = generate_matrix(T, tau)
self.register_buffer('m1', matrix)
self.register_buffer('m2', matrix2)
# self.m2 *= self.threshold
self.fc = nn.Linear(T, T)
nn.init.constant_(self.fc.bias, 0.)
nn.init.kaiming_normal_(self.fc.weight, mode='fan_out', nonlinearity='relu')
mask0 = torch.tril(torch.ones([T, T]))
self.register_buffer('mask0', mask0)
def integral(self, inputs):
d1 = self.m1 @ inputs.flatten(1)
d2 = torch.addmm(self.fc.bias.unsqueeze(1), self.masked_weight(), inputs.flatten((1)))
self.mem = (d1 + self.m2 @ d2.sigmoid()).view(inputs.shape)
def calc_spike(self):
self.spike = self.act_fun(self.mem - self.threshold)
class SPSN(BaseNode):
def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
init_w = -math.log(tau - 1.)
if isinstance(act_fun, str):
act_fun = eval(act_fun)
self.parallel = True
self.act_fun = act_fun(alpha=2., requires_grad=True)
m1, m2 = generate_matrix(self.step, tau)
self.register_buffer('m1', m1)
def integral(self, inputs):
self.mem = (self.m1 @ inputs.flatten(1)).sigmoid().view(inputs.shape)
def calc_spike(self):
self.spike = torch.bernoulli(self.mem)
class NoisePLIFNode(PLIFNode):
"""
Noisy Parametric Leaky Integrate and Fire
:param threshold: 神经元发放脉冲需要达到的阈值
:param v_reset: 静息电位
:param dt: 时间步长
:param step: 仿真步
:param tau: 膜电位时间常数, 用于控制膜电位衰减
:param act_fun: 使用surrogate gradient 对梯度进行近似, 默认为 ``surrogate.AtanGrad``
:param requires_thres_grad: 是否需要计算对于threshold的梯度, 默认为 ``False``
:param sigmoid_thres: 是否使用sigmoid约束threshold的范围搭到 [0, 1], 默认为 ``False``
:param requires_fp: 是否需要在推理过程中保存feature map, 需要消耗额外的内存和时间, 默认为 ``False``
:param layer_by_layer: 是否以一次性计算所有step的输出, 在网络模型较大的情况下, 一般会缩短单次推理的时间, 默认为 ``False``
:param n_groups: 在不同的时间步, 是否使用不同的权重, 默认为 ``1``, 即不分组
:param args: 其他的参数
:param kwargs: 其他的参数
"""
def __init__(self,
threshold=1,
tau=2.,
act_fun=GateGrad,
*args,
**kwargs):
super().__init__(threshold=threshold, tau=tau, act_fun=act_fun, *args, **kwargs)
log_alpha = kwargs['log_alpha'] if 'log_alpha' in kwargs else np.log(2)
log_beta = kwargs['log_beta'] if 'log_beta' in kwargs else np.log(6)
self.log_alpha = Parameter(torch.as_tensor(log_alpha), requires_grad=True)
self.log_beta = Parameter(torch.as_tensor(log_beta), requires_grad=True)
# self.fc = nn.Sequential(
# nn.Linear(1, 5),
# nn.ReLU(),
# nn.Linear(5, 5),
# nn.ReLU(),
# nn.Linear(5, 2)
# )
def integral(self, inputs): # b, c, w, h / b, c
# self.mu, self.log_var = self.fc(inputs.mean().unsqueeze(0)).split(1)
alpha, beta = torch.exp(self.log_alpha), torch.exp(self.log_beta)
mu = alpha / (alpha + beta)
var = ((alpha + 1) * alpha) / ((alpha + beta + 1) * (alpha + beta))
noise = torch.distributions.beta.Beta(alpha, beta).sample(inputs.shape) * self.get_thres()
noise = noise * var / var.detach() + mu - mu.detach()
self.mem = self.mem + ((inputs - self.mem) * self.w.sigmoid() + noise) * self.dt
class BiasPLIFNode(BaseNode):
"""
Parametric LIF with bias
:param threshold: 神经元发放脉冲需要达到的阈值
:param v_reset: 静息电位
:param dt: 时间步长
:param step: 仿真步
:param tau: 膜电位时间常数, 用于控制膜电位衰减
:param act_fun: 使用surrogate gradient 对梯度进行近似, 默认为 ``surrogate.AtanGrad``
:param requires_thres_grad: 是否需要计算对于threshold的梯度, 默认为 ``False``
:param sigmoid_thres: 是否使用sigmoid约束threshold的范围搭到 [0, 1], 默认为 ``False``
:param requires_fp: 是否需要在推理过程中保存feature map, 需要消耗额外的内存和时间, 默认为 ``False``
:param layer_by_layer: 是否以一次性计算所有step的输出, 在网络模型较大的情况下, 一般会缩短单次推理的时间, 默认为 ``False``
:param n_groups: 在不同的时间步, 是否使用不同的权重, 默认为 ``1``, 即不分组
:param args: 其他的参数
:param kwargs: 其他的参数
"""
def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
init_w = -math.log(tau - 1.)
if isinstance(act_fun, str):
act_fun = eval(act_fun)
self.act_fun = act_fun(alpha=2., requires_grad=True)
self.w = nn.Parameter(torch.as_tensor(init_w))
def integral(self, inputs):
self.mem = self.mem + ((inputs - self.mem) * self.w.sigmoid() + 0.1) * self.dt
def calc_spike(self):
self.spike = self.act_fun(self.mem - self.get_thres())
self.mem = self.mem * (1 - self.spike.detach())
class DoubleSidePLIFNode(LIFNode):
"""
能够输入正负脉冲的 PLIF
:param threshold: 神经元发放脉冲需要达到的阈值
:param v_reset: 静息电位
:param dt: 时间步长
:param step: 仿真步
:param tau: 膜电位时间常数, 用于控制膜电位衰减
:param act_fun: 使用surrogate gradient 对梯度进行近似, 默认为 ``surrogate.AtanGrad``
:param requires_thres_grad: 是否需要计算对于threshold的梯度, 默认为 ``False``
:param sigmoid_thres: 是否使用sigmoid约束threshold的范围搭到 [0, 1], 默认为 ``False``
:param requires_fp: 是否需要在推理过程中保存feature map, 需要消耗额外的内存和时间, 默认为 ``False``
:param layer_by_layer: 是否以一次性计算所有step的输出, 在网络模型较大的情况下, 一般会缩短单次推理的时间, 默认为 ``False``
:param n_groups: 在不同的时间步, 是否使用不同的权重, 默认为 ``1``, 即不分组
:param args: 其他的参数
:param kwargs: 其他的参数
"""
def __init__(self,
threshold=.5,
tau=2.,
act_fun=AtanGrad,
*args,
**kwargs):
super().__init__(threshold, tau, act_fun, *args, **kwargs)
if isinstance(act_fun, str):
act_fun = eval(act_fun)
self.act_fun = act_fun(alpha=2., requires_grad=True)
def calc_spike(self):
self.spike = self.act_fun(self.mem - self.get_thres()) - self.act_fun(self.get_thres - self.mem)
self.mem = self.mem * (1. - torch.abs(self.spike.detach()))
class IzhNode(BaseNode):
"""
Izhikevich 脉冲神经元
:param threshold: 神经元发放脉冲需要达到的阈值
:param v_reset: 静息电位
:param dt: 时间步长
:param step: 仿真步
:param tau: 膜电位时间常数, 用于控制膜电位衰减
:param act_fun: 使用surrogate gradient 对梯度进行近似, 默认为 ``surrogate.AtanGrad``
:param args: 其他的参数
:param kwargs: 其他的参数
"""
def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
self.tau = tau
if isinstance(act_fun, str):
act_fun = eval(act_fun)
self.act_fun = act_fun(alpha=2., requires_grad=False)
self.a = kwargs['a'] if 'a' in kwargs else 0.02
self.b = kwargs['b'] if 'b' in kwargs else 0.2
self.c = kwargs['c'] if 'c' in kwargs else -55.
self.d = kwargs['d'] if 'd' in kwargs else -2.
'''
v' = 0.04v^2 + 5v + 140 -u + I
u' = a(bv-u)
下面是将Izh离散化的写法
if v>= thresh:
v = c
u = u + d
'''
# 初始化膜电势 以及 对应的U
self.mem = 0.
self.u = 0.
self.dt = kwargs['dt'] if 'dt' in kwargs else 1.
def integral(self, inputs):
self.mem = self.mem + self.dt * (0.04 * self.mem * self.mem + 5 * self.mem - self.u + 140 + inputs)
self.u = self.u + self.dt * (self.a * self.b * self.mem - self.a * self.u)
def calc_spike(self):
self.spike = self.act_fun(self.mem - self.get_thres()) # 大于阈值释放脉冲
self.mem = self.mem * (1 - self.spike.detach()) + self.spike.detach() * self.c
self.u = self.u + self.spike.detach() * self.d
def n_reset(self):
self.mem = 0.
self.u = 0.
self.spike = 0.
class IzhNodeMU(BaseNode):
"""
Izhikevich 脉冲神经元多参数版
:param threshold: 神经元发放脉冲需要达到的阈值
:param v_reset: 静息电位
:param dt: 时间步长
:param step: 仿真步
:param tau: 膜电位时间常数, 用于控制膜电位衰减
:param act_fun: 使用surrogate gradient 对梯度进行近似, 默认为 ``surrogate.AtanGrad``
:param args: 其他的参数
:param kwargs: 其他的参数
"""
def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
self.tau = tau
if isinstance(act_fun, str):
act_fun = eval(act_fun)
self.act_fun = act_fun(alpha=2., requires_grad=False)
self.a = kwargs['a'] if 'a' in kwargs else 0.02
self.b = kwargs['b'] if 'b' in kwargs else 0.2
self.c = kwargs['c'] if 'c' in kwargs else -55.
self.d = kwargs['d'] if 'd' in kwargs else -2.
self.mem = kwargs['mem'] if 'mem' in kwargs else 0.
self.u = kwargs['u'] if 'u' in kwargs else 0.
self.dt = kwargs['dt'] if 'dt' in kwargs else 1.
def integral(self, inputs):
self.mem = self.mem + self.dt * (0.04 * self.mem * self.mem + 5 * self.mem - self.u + 140 + inputs)
self.u = self.u + self.dt * (self.a * self.b * self.mem - self.a * self.u)
def calc_spike(self):
self.spike = self.act_fun(self.mem - self.threshold)
self.mem = self.mem * (1 - self.spike.detach()) + self.spike.detach() * self.c
self.u = self.u + self.spike.detach() * self.d
def n_reset(self):
self.mem = -70.
self.u = 0.
self.spike = 0.
def requires_activation(self):
return False
class DGLIFNode(BaseNode):
"""
Reference: https://arxiv.org/abs/2110.08858
:param threshold: 神经元的脉冲发放阈值
:param tau: 神经元的膜常数, 控制膜电位衰减
"""
def __init__(self, threshold=.5, tau=2., *args, **kwargs):
super().__init__(threshold, tau, *args, **kwargs)
self.act = nn.ReLU()
self.tau = tau
def integral(self, inputs):
inputs = self.act(inputs)
self.mem = self.mem + ((inputs - self.mem) / self.tau) * self.dt
def calc_spike(self):
spike = self.mem.clone()
spike[(spike < self.get_thres())] = 0.
# self.spike = spike / (self.mem.detach().clone() + 1e-12)
self.spike = spike - spike.detach() + \
torch.where(spike.detach() > self.get_thres(), torch.ones_like(spike), torch.zeros_like(spike))
self.spike = spike
self.mem = torch.where(self.mem >= self.get_thres(), torch.zeros_like(self.mem), self.mem)
class HTDGLIFNode(IFNode):
"""
Reference: https://arxiv.org/abs/2110.08858
:param threshold: 神经元的脉冲发放阈值
:param tau: 神经元的膜常数, 控制膜电位衰减
"""
def __init__(self, threshold=.5, tau=2., *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
self.warm_up = False
def calc_spike(self):
spike = self.mem.clone()
spike[(spike < self.get_thres())] = 0.
# self.spike = spike / (self.mem.detach().clone() + 1e-12)
self.spike = spike - spike.detach() + \
torch.where(spike.detach() > self.get_thres(), torch.ones_like(spike), torch.zeros_like(spike))
self.spike = spike
self.mem = torch.where(self.mem >= self.get_thres(), torch.zeros_like(self.mem), self.mem)
# self.mem[[(spike > self.get_thres())]] = self.mem[[(spike > self.get_thres())]] - self.get_thres()
self.mem = (self.mem + 0.2 * self.spike - 0.2 * self.spike.detach()) * self.dt
def forward(self, inputs):
if self.warm_up:
return F.relu(inputs)
else:
return super(IFNode, self).forward(F.relu(inputs))
class SimHHNode(BaseNode):
"""
简单版本的HH模型
:param threshold: 神经元发放脉冲需要达到的阈值
:param v_reset: 静息电位
:param dt: 时间步长
:param step: 仿真步
:param tau: 膜电位时间常数, 用于控制膜电位衰减
:param act_fun: 使用surrogate gradient 对梯度进行近似, 默认为 ``surrogate.AtanGrad``
:param args: 其他的参数
:param kwargs: 其他的参数
"""
def __init__(self, threshold=50., tau=2., act_fun=AtanGrad, *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
self.tau = tau
if isinstance(act_fun, str):
act_fun = eval(act_fun)
'''
I = Cm dV/dt + g_k*n^4*(V_m-V_k) + g_Na*m^3*h*(V_m-V_Na) + g_l*(V_m - V_L)
'''
self.act_fun = act_fun(alpha=2., requires_grad=False)
self.g_Na, self.g_K, self.g_l = torch.tensor(120.), torch.tensor(120), torch.tensor(0.3) # k 36
self.V_Na, self.V_K, self.V_l = torch.tensor(120.), torch.tensor(-120.), torch.tensor(10.6) # k -12
self.m, self.n, self.h = torch.tensor(0), torch.tensor(0), torch.tensor(0)
self.mem = 0
self.dt = 0.01
def integral(self, inputs):
self.I_Na = torch.pow(self.m, 3) * self.g_Na * self.h * (self.mem - self.V_Na)
self.I_K = torch.pow(self.n, 4) * self.g_K * (self.mem - self.V_K)
self.I_L = self.g_l * (self.mem - self.V_l)
self.mem = self.mem + self.dt * (inputs - self.I_Na - self.I_K - self.I_L) / 0.02
# non Na
# self.mem = self.mem + 0.01 * (inputs - self.I_K - self.I_L) / 0.02 #decayed
# NON k
# self.mem = self.mem + 0.01 * (inputs - self.I_Na - self.I_L) / 0.02 #increase
self.alpha_n = 0.01 * (self.mem + 10.0) / (1 - torch.exp(-(self.mem + 10.0) / 10))
self.beta_n = 0.125 * torch.exp(-(self.mem) / 80)
self.alpha_m = 0.1 * (self.mem + 25) / (1 - torch.exp(-(self.mem + 25) / 10))
self.beta_m = 4 * torch.exp(-(self.mem) / 18)
self.alpha_h = 0.07 * torch.exp(-(self.mem) / 20)
self.beta_h = 1 / (1 + torch.exp(-(self.mem + 30) / 10))
self.n = self.n + self.dt * (self.alpha_n * (1 - self.n) - self.beta_n * self.n)
self.m = self.m + self.dt * (self.alpha_m * (1 - self.m) - self.beta_m * self.m)
self.h = self.h + self.dt * (self.alpha_h * (1 - self.h) - self.beta_h * self.h)
def calc_spike(self):
self.spike = self.act_fun(self.mem - self.threshold)
self.mem = self.mem * (1 - self.spike.detach())
def forward(self, inputs):
self.integral(inputs)
self.calc_spike()
return self.spike
def n_reset(self):
self.mem = 0.
self.spike = 0.
self.m, self.n, self.h = torch.tensor(0), torch.tensor(0), torch.tensor(0)
def requires_activation(self):
return False
class CTIzhNode(IzhNode):
def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kwargs):
super().__init__(threshold, tau, act_fun, *args, **kwargs)
self.name = kwargs['name'] if 'name' in kwargs else ''
self.excitability = kwargs['excitability'] if 'excitability' in kwargs else 'TRUE'
self.spikepattern = kwargs['spikepattern'] if 'spikepattern' in kwargs else 'RS'
self.synnum = kwargs['synnum'] if 'synnum' in kwargs else 0
self.locationlayer = kwargs['locationlayer'] if 'locationlayer' in kwargs else ''
self.adjneuronlist = {}
self.proximal_dendrites = []
self.distal_dendrites = []
self.totalindex = kwargs['totalindex'] if 'totalindex' in kwargs else 0
self.colindex = 0
self.state = 'inactive'
self.Gup = kwargs['Gup'] if 'Gup' in kwargs else 0.0
self.Gdown = kwargs['Gdown'] if 'Gdown' in kwargs else 0.0
self.Vr = kwargs['Vr'] if 'Vr' in kwargs else 0.0
self.Vt = kwargs['Vt'] if 'Vt' in kwargs else 0.0
self.Vpeak = kwargs['Vpeak'] if 'Vpeak' in kwargs else 0.0
self.capicitance = kwargs['capacitance'] if 'capacitance' in kwargs else 0.0
self.k = kwargs['k'] if 'k' in kwargs else 0.0
self.mem = -65
self.vtmp = -65
self.u = -13.0
self.spike = 0
self.dc = 0
def integral(self, inputs):
self.mem += self.dt * (
self.k * (self.mem - self.Vr) * (self.mem - self.Vt) - self.u + inputs) / self.capicitance
self.u += self.dt * (self.a * (self.b * (self.mem - self.Vr) - self.u))
def calc_spike(self):
if self.mem >= self.Vpeak:
self.mem = self.c
self.u = self.u + self.d
self.spike = 1
self.spreadMarkPostNeurons()
def spreadMarkPostNeurons(self):
for post, list in self.adjneuronlist.items():
if self.excitability == "TRUE":
post.dc = random.randint(140, 160)
else:
post.dc = random.randint(-160, -140)
class adth(BaseNode):
"""
The adaptive Exponential Integrate-and-Fire model (aEIF)
:param args: Other parameters
:param kwargs: Other parameters
"""
def __init__(self, *args, **kwargs):
super().__init__(requires_fp=False, *args, **kwargs)
def adthNode(self, v, dt, c_m, g_m, alpha_w, ad, Ieff, Ichem, Igap, tau_ad, beta_ad, vt, vm1):
"""
Calculate the neurons that discharge after the current threshold is reached
:param v: Current neuron voltage
:param dt: time step
:param ad:Adaptive variable
:param vv:Spike, if the voltage exceeds the threshold from below
"""
v = v + dt / c_m * (-g_m * v + alpha_w * ad + Ieff + Ichem + Igap)
ad = ad + dt / tau_ad * (-ad + beta_ad * v)
vv = (v >= vt).astype(int) * (vm1 < vt).astype(int)
vm1 = v
return v, ad, vv, vm1
def calc_spike(self):
pass
class HHNode(BaseNode):
"""
用于脑模拟的HH模型
p: [threshold, g_Na, g_K, g_l, V_Na, V_K, V_l, C]
"""
def __init__(self, p, dt, device, act_fun=AtanGrad, *args, **kwargs):
super().__init__(threshold=p[0], *args, **kwargs)
if isinstance(act_fun, str):
act_fun = eval(act_fun)
'''
I = Cm dV/dt + g_k*n^4*(V_m-V_k) + g_Na*m^3*h*(V_m-V_Na) + g_l*(V_m - V_L)
'''
self.neuron_num = len(p[0])
self.act_fun = act_fun(alpha=2., requires_grad=False)
self.tau_I = 3
self.g_Na = torch.tensor(p[1])
self.g_K = torch.tensor(p[2])
self.g_l = torch.tensor(p[3])
self.V_Na = torch.tensor(p[4])
self.V_K = torch.tensor(p[5])
self.V_l = torch.tensor(p[6])
self.C = torch.tensor(p[7])
self.m = 0.05 * torch.ones(self.neuron_num, device=device, requires_grad=False)
self.n = 0.31 * torch.ones(self.neuron_num, device=device, requires_grad=False)
self.h = 0.59 * torch.ones(self.neuron_num, device=device, requires_grad=False)
self.v_reset = 0
self.dt = dt
self.dt_over_tau = self.dt / self.tau_I
self.sqrt_coeff = math.sqrt(1 / (2 * (1 / self.dt_over_tau)))
self.mu = 10
self.sig = 12
self.mem = torch.tensor(self.v_reset, device=device, requires_grad=False)
self.mem_p = self.mem
self.spike = torch.zeros(self.neuron_num, device=device, requires_grad=False)
self.Iback = torch.zeros(self.neuron_num, device=device, requires_grad=False)
self.Ieff = torch.zeros(self.neuron_num, device=device, requires_grad=False)
def integral(self, inputs):
self.alpha_n = (0.1 - 0.01 * self.mem) / (torch.exp(1 - 0.1 * self.mem) - 1)
self.alpha_m = (2.5 - 0.1 * self.mem) / (torch.exp(2.5 - 0.1 * self.mem) - 1)
self.alpha_h = 0.07 * torch.exp(-self.mem / 20.0)
self.beta_n = 0.125 * torch.exp(-self.mem / 80.0)
self.beta_m = 4.0 * torch.exp(-self.mem / 18.0)
self.beta_h = 1 / (torch.exp(3 - 0.1 * self.mem) + 1)
self.n = self.n + self.dt * (self.alpha_n * (1 - self.n) - self.beta_n * self.n)
self.m = self.m + self.dt * (self.alpha_m * (1 - self.m) - self.beta_m * self.m)
self.h = self.h + self.dt * (self.alpha_h * (1 - self.h) - self.beta_h * self.h)
self.I_Na = torch.pow(self.m, 3) * self.g_Na * self.h * (self.mem - self.V_Na)
self.I_K = torch.pow(self.n, 4) * self.g_K * (self.mem - self.V_K)
self.I_L = self.g_l * (self.mem - self.V_l)
self.mem_p = self.mem
self.mem = self.mem + self.dt * (inputs - self.I_Na - self.I_K - self.I_L) / self.C
def calc_spike(self):
self.spike = (self.threshold > self.mem_p).float() * (self.mem > self.threshold).float()
def forward(self, inputs):
self.integral(inputs)
self.calc_spike()
return self.spike, self.mem
def requires_activation(self):
return False
class aEIF(BaseNode):
"""
The adaptive Exponential Integrate-and-Fire model (aEIF)
This class define the membrane, spike, current and parameters of a neuron group of a specific type
:param args: Other parameters
:param kwargs: Other parameters
"""
def __init__(self, p, dt, device, *args, **kwargs):
"""
p:[threshold, v_reset, c_m, tao_w, alpha_ad, beta_ad]
"""
super().__init__(threshold=p[0], requires_fp=False, *args, **kwargs)
self.neuron_num = len(p[0])
self.g_m = 0.1 # neuron conduction
self.dt = dt
self.tau_I = 3 # Time constant to filter the synaptic inputs
self.Delta_T = 0.5 # parameter
self.v_reset = p[1] # membrane potential reset to v_reset after fire spike
self.c_m = p[2]
self.tau_w = p[3] # Time constant of adaption coupling
self.alpha_ad = p[4]
self.beta_ad = p[5]
self.refrac = 5 / self.dt # refractory period
self.dt_over_tau = self.dt / self.tau_I
self.sqrt_coeff = math.sqrt(1 / (2 * (1 / self.dt_over_tau)))
self.mem = self.v_reset
self.spike = torch.zeros(self.neuron_num, device=device, requires_grad=False)
self.ad = torch.zeros(self.neuron_num, device=device, requires_grad=False)
self.ref = torch.randint(0, int(self.refrac + 1), (1, self.neuron_num), device=device, requires_grad=False).squeeze(
0) # refractory counter
self.ref = self.ref.float()
self.mu = 10
self.sig = 12
self.Iback = torch.zeros(self.neuron_num, device=device, requires_grad=False)
self.Ieff = torch.zeros(self.neuron_num, device=device, requires_grad=False)
def integral(self, inputs):
self.mem = self.mem + (self.ref > self.refrac) * self.dt / self.c_m * \
(-self.g_m * (self.mem - self.v_reset) + self.g_m * self.Delta_T *
torch.exp((self.mem - self.threshold) / self.Delta_T) +
self.alpha_ad * self.ad + inputs)
self.ad = self.ad + (self.ref > self.refrac) * self.dt / self.tau_w * \
(-self.ad + self.beta_ad * (self.mem - self.v_reset))
def calc_spike(self):
self.spike = (self.mem > self.threshold).float()
self.ref = self.ref * (1 - self.spike) + 1
self.ad = self.ad + self.spike * 30
self.mem = self.spike * self.v_reset + (1 - self.spike.detach()) * self.mem
def forward(self, inputs):
# aeifnode_cuda.forward(self.threshold, self.c_m, self.alpha_w, self.beta_ad, inputs, self.ref, self.ad, self.mem, self.spike)
self.integral(inputs)
self.calc_spike()
return self.spike, self.mem
class LIAFNode(BaseNode):
"""
Leaky Integrate and Analog Fire (LIAF), Reference: https://ieeexplore.ieee.org/abstract/document/9429228
与LIF相同, 但前传的是膜电势, 更新沿用阈值和膜电势
:param act_fun: 前传使用的激活函数 [ReLU, SeLU, LeakyReLU]
:param threshold_related: 阈值依赖模式,若为"True"则 self.spike = act_fun(mem-threshold)
:note that BaseNode return self.spike, and here self.spike is analog value.
"""
def __init__(self, spike_act=BackEIGateGrad(), act_fun="SELU", threshold=0.5, tau=2., threshold_related=True, *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
if isinstance(act_fun, str):
act_fun = eval("nn." + act_fun + "()")
self.tau = tau
self.act_fun = act_fun
self.spike_act = spike_act
self.threshold_related = threshold_related
def integral(self, inputs):
self.mem = self.mem + (inputs - self.mem) / self.tau
def calc_spike(self):
if self.threshold_related:
spike_tmp = self.act_fun(self.mem - self.threshold)
else:
spike_tmp = self.act_fun(self.mem)
self.spike = self.spike_act(self.mem - self.threshold)
self.mem = self.mem * (1 - self.spike)
self.spike = spike_tmp
class OnlineLIFNode(BaseNode):
"""
Online-update Leaky Integrate and Fire
与LIF模型相同,但是时序信息在反传时从计算图剥离,因此可以实现在线的更新;模型占用显存固定,不随仿真步step线性提升。
使用此神经元需要修改: 1. 将模型中t次forward从model_zoo写到main.py中
2. 在Conv层与OnelineLIFNode层中加入Replace函数,即时序前传都是detach的,但仍计算该层空间梯度信息。
3. 网络结构不适用BN层,使用weight standardization
注意该神经元不同于OTTT,而是将时序信息全部扔弃。对应这篇文章:https://arxiv.org/abs/2302.14311
若需保留时序,需要对self.rate_tracking进行计算。实现可参考https://github.com/pkuxmq/OTTT-SNN
"""
def __init__(self, threshold=0.5, tau=2., act_fun=QGateGrad, init=False, *args, **kwargs):
super().__init__(threshold, *args, **kwargs)
self.tau = tau
if isinstance(act_fun, str):
act_fun = eval(act_fun)
self.act_fun = act_fun(alpha=2., requires_grad=False)
self.rate_tracking = None
self.init = True
def integral(self, inputs):
if self.init is True:
self.mem = torch.zeros_like(inputs)
self.init = False
self.mem = self.mem.detach() + (inputs - self.mem.detach()) / self.tau
def calc_spike(self):
self.spike = self.act_fun(self.mem - self.threshold)
self.mem = self.mem * (1 - self.spike.detach())
with torch.no_grad():
if self.rate_tracking == None:
self.rate_tracking = self.spike.clone().detach()
self.spike = torch.cat((self.spike, self.rate_tracking), dim=0)
class AdaptiveNode(LIFNode):
def __init__(self, threshold=1., act_fun=QGateGrad, step=10, spike_output=True, *args, **kwargs):
super().__init__(threshold=threshold, step=step, **kwargs)
self.n_encode_type = kwargs['n_encode_type'] if 'n_encode_type' in kwargs else 'linear'
if isinstance(act_fun, str):
act_fun = eval(act_fun)
self.act_fun = act_fun(alpha=2., requires_grad=False)
# self.act_fun = BinaryActivation()
print(self.n_encode_type)
if self.n_encode_type == 'linear':
self.encoder = nn.Sequential(
CustomLinear(self.step, self.step)
)
elif self.n_encode_type == 'mlp':
# Direct
self.encoder = nn.Sequential(
CustomLinear(self.step, self.step),
nn.ReLU(),
CustomLinear(self.step, self.step),
nn.ReLU(),
CustomLinear(self.step, self.step),
nn.ReLU(),
CustomLinear(self.step, self.step),
)
elif self.n_encode_type == 'att':
# -> SE block
self.encoder = nn.Sequential(
nn.Linear(self.step, self.step),
nn.ReLU(),
nn.Linear(self.step, self.step),
nn.ReLU(),
nn.Linear(self.step, self.step),
nn.Sigmoid()
)
elif self.n_encode_type == 'conv':
self.encoder = nn.Sequential(
nn.Linear(self.step, self.step),
nn.ReLU(),
nn.Linear(self.step, self.step),
)
# self.init_weight()
else:
raise NotImplementedError('Unrecognizable categories {}.'.format(self.n_encode_type))
self.saved_mem = 0.
def init_weight(self):
for mod in self.encoder.modules():
if isinstance(mod, nn.Conv1d):
mod.weight.data[:, :, 4] = 1. / mod.weight.shape[0]
mod.weight.data[:, :, [0, 1, 2, 3, 5, 6, 7, 8]] = 0.
mod.bias.data[:] = 0.
def forward(self, inputs): # (t b) c w h
if self.n_encode_type != 'conv':
x = rearrange(inputs, '(t b) ... -> b ... t', t=self.step)
else:
c, w, h = inputs.shape[1:]
x = rearrange(inputs, '(t b) c w h -> (b c w h) 1 t', t=self.step)
if self.n_encode_type != 'att':
x = self.encoder(x) # Direct
else:
x = x * self.encoder(x) # SE Block
if self.n_encode_type != 'conv':
x = rearrange(x, 'b ... t -> (t b) ...')
else:
x = rearrange(x, '(b c w h) 1 t -> (t b) c w h', c=c, w=w, h=h)
# self.spike = self.act_fun(x - 0.5)
# # print(self.spike.mean())
# # print(self.requires_fp)
# if self.requires_fp:
# spike = rearrange(self.spike, '(t b) c w h -> t b c w h', t=self.step)
# for t in range(self.step):
# # print(t, float(spike[t].mean()), float(spike[t].std()))
# self.feature_map.append(spike[t])
# self.saved_mem = x
# return self.spike
return super().forward(x)
# def get_thres(self):
# mem_relu = F.relu(self.mem.detach())
# return mem_relu[mem_relu > 0.].median()
def n_reset(self):
super().n_reset()
self.saved_mem = 0.
================================================
FILE: braincog/base/strategy/LateralInhibition.py
================================================
import warnings
import torch
from torch import nn
import torch.nn.functional as F
class LateralInhibition(nn.Module):
"""
侧抑制 用于发放脉冲的神经元抑制其他同层神经元 在膜电位上作用
"""
def __init__(self, node, inh, mode="constant"):
super().__init__()
self.inh = inh
self.node = node
self.mode = mode
def forward(self, x: torch.Tensor, xori=None):
# x.shape = [N, C,W,H]
# ret.shape = [N, C,W,H]
if self.mode == "constant":
self.node.mem = self.node.mem - self.inh * (x.max(1, True)[0] - x)
elif self.mode == "max":
self.node.mem = self.node.mem - self.inh * xori.max(1, True)[0] .detach() * (x.max(1, True)[0] - x)
elif self.mode == "threshold":
self.node.mem = self.node.mem - self.inh * self.node.threshold * (x.max(1, True)[0] - x)
else:
pass
return x
================================================
FILE: braincog/base/strategy/__init__.py
================================================
__all__ = ['surrogate', 'LateralInhibition']
from . import (
surrogate,
LateralInhibition
)
================================================
FILE: braincog/base/strategy/surrogate.py
================================================
import math
import torch
from torch import nn
from torch.nn import functional as F
def heaviside(x):
return (x >= 0.).to(x.dtype)
class SurrogateFunctionBase(nn.Module):
"""
Surrogate Function 的基类
:param alpha: 为一些能够调控函数形状的代理函数提供参数.
:param requires_grad: 参数 ``alpha`` 是否需要计算梯度, 默认为 ``False``
"""
def __init__(self, alpha, requires_grad=True):
super().__init__()
self.alpha = nn.Parameter(
torch.tensor(alpha, dtype=torch.float),
requires_grad=requires_grad)
@staticmethod
def act_fun(x, alpha):
"""
:param x: 膜电位的输入
:param alpha: 控制代理梯度形状的变量, 可以为 ``NoneType``
:return: 激发之后的spike, 取值为 ``[0, 1]``
"""
raise NotImplementedError
def forward(self, x):
"""
:param x: 膜电位输入
:return: 激发之后的spike
"""
return self.act_fun(x, self.alpha)
'''
sigmoid surrogate function.
'''
class sigmoid(torch.autograd.Function):
"""
使用 sigmoid 作为代理梯度函数
对应的原函数为:
.. math::
g(x) = \\mathrm{sigmoid}(\\alpha x) = \\frac{1}{1+e^{-\\alpha x}}
反向传播的函数为:
.. math::
g'(x) = \\alpha * (1 - \\mathrm{sigmoid} (\\alpha x)) \\mathrm{sigmoid} (\\alpha x)
"""
@staticmethod
def forward(ctx, x, alpha):
if x.requires_grad:
ctx.save_for_backward(x)
ctx.alpha = alpha
return heaviside(x)
@staticmethod
def backward(ctx, grad_output):
grad_x = None
if ctx.needs_input_grad[0]:
s_x = torch.sigmoid(ctx.alpha * ctx.saved_tensors[0])
grad_x = grad_output * s_x * (1 - s_x) * ctx.alpha
return grad_x, None
class SigmoidGrad(SurrogateFunctionBase):
def __init__(self, alpha=1., requires_grad=False):
super().__init__(alpha, requires_grad)
@staticmethod
def act_fun(x, alpha):
return sigmoid.apply(x, alpha)
'''
atan surrogate function.
'''
class atan(torch.autograd.Function):
"""
使用 Atan 作为代理梯度函数
对应的原函数为:
.. math::
g(x) = \\frac{1}{\\pi} \\arctan(\\frac{\\pi}{2}\\alpha x) + \\frac{1}{2}
反向传播的函数为:
.. math::
g'(x) = \\frac{\\alpha}{2(1 + (\\frac{\\pi}{2}\\alpha x)^2)}
"""
@staticmethod
def forward(ctx, inputs, alpha):
ctx.save_for_backward(inputs, alpha)
return inputs.gt(0.).float()
@staticmethod
def backward(ctx, grad_output):
grad_x = None
grad_alpha = None
shared_c = grad_output / \
(1 + (ctx.saved_tensors[1] * math.pi /
2 * ctx.saved_tensors[0]).square())
if ctx.needs_input_grad[0]:
grad_x = ctx.saved_tensors[1] / 2 * shared_c
if ctx.needs_input_grad[1]:
grad_alpha = (ctx.saved_tensors[0] / 2 * shared_c).sum()
return grad_x, grad_alpha
class AtanGrad(SurrogateFunctionBase):
def __init__(self, alpha=2., requires_grad=True):
super().__init__(alpha, requires_grad)
@staticmethod
def act_fun(x, alpha):
return atan.apply(x, alpha)
'''
gate surrogate fucntion.
'''
class gate(torch.autograd.Function):
"""
使用 gate 作为代理梯度函数
对应的原函数为:
.. math::
g(x) = \\mathrm{NonzeroSign}(x) \\log (|\\alpha x| + 1)
反向传播的函数为:
.. math::
g'(x) = \\frac{\\alpha}{1 + |\\alpha x|} = \\frac{1}{\\frac{1}{\\alpha} + |x|}
"""
@staticmethod
def forward(ctx, x, alpha):
if x.requires_grad:
grad_x = torch.where(x.abs() < 1. / alpha, torch.ones_like(x), torch.zeros_like(x))
ctx.save_for_backward(grad_x)
return x.gt(0).float()
@staticmethod
def backward(ctx, grad_output):
grad_x = None
if ctx.needs_input_grad[0]:
grad_x = grad_output * ctx.saved_tensors[0]
return grad_x, None
class GateGrad(SurrogateFunctionBase):
def __init__(self, alpha=2., requires_grad=False):
super().__init__(alpha, requires_grad)
@staticmethod
def act_fun(x, alpha):
return gate.apply(x, alpha)
'''
gatquadratic_gate surrogate function.
'''
class quadratic_gate(torch.autograd.Function):
"""
使用 quadratic_gate 作为代理梯度函数
对应的原函数为:
.. math::
g(x) =
\\begin{cases}
0, & x < -\\frac{1}{\\alpha} \\\\
-\\frac{1}{2}\\alpha^2|x|x + \\alpha x + \\frac{1}{2}, & |x| \\leq \\frac{1}{\\alpha} \\\\
1, & x > \\frac{1}{\\alpha} \\\\
\\end{cases}
反向传播的函数为:
.. math::
g'(x) =
\\begin{cases}
0, & |x| > \\frac{1}{\\alpha} \\\\
-\\alpha^2|x|+\\alpha, & |x| \\leq \\frac{1}{\\alpha}
\\end{cases}
"""
@staticmethod
def forward(ctx, x, alpha):
if x.requires_grad:
mask_zero = (x.abs() > 1 / alpha)
grad_x = -alpha * alpha * x.abs() + alpha
grad_x.masked_fill_(mask_zero, 0)
ctx.save_for_backward(grad_x)
return x.gt(0.).float()
@staticmethod
def backward(ctx, grad_output):
grad_x = None
if ctx.needs_input_grad[0]:
grad_x = grad_output * ctx.saved_tensors[0]
return grad_x, None
class QGateGrad(SurrogateFunctionBase):
def __init__(self, alpha=2., requires_grad=False):
super().__init__(alpha, requires_grad)
@staticmethod
def act_fun(x, alpha):
return quadratic_gate.apply(x, alpha)
class relu_like(torch.autograd.Function):
@staticmethod
def forward(ctx, x, alpha):
if x.requires_grad:
ctx.save_for_backward(x, alpha)
return heaviside(x)
@staticmethod
def backward(ctx, grad_output):
grad_x, grad_alpha = None, None
x, alpha = ctx.saved_tensors
if ctx.needs_input_grad[0]:
grad_x = grad_output * x.gt(0.).float() * alpha
if ctx.needs_input_grad[1]:
grad_alpha = (grad_output * F.relu(x)).sum()
return grad_x, grad_alpha
class RoundGrad(nn.Module):
def __init__(self, **kwargs):
super(RoundGrad, self).__init__()
self.act = nn.Hardtanh(-.5, 4.5)
def forward(self, x):
x = self.act(x)
return x.ceil() + x - x.detach()
class ReLUGrad(SurrogateFunctionBase):
"""
使用ReLU作为代替梯度函数, 主要用为相同结构的ANN的测试
"""
def __init__(self, alpha=2., requires_grad=False):
super().__init__(alpha, requires_grad)
@staticmethod
def act_fun(x, alpha):
return relu_like.apply(x, alpha)
'''
Straight-Through (ST) Estimator
'''
class straight_through_estimator(torch.autograd.Function):
"""
使用直通估计器作为代理梯度函数
http://arxiv.org/abs/1308.3432
"""
@staticmethod
def forward(ctx, inputs):
outputs = heaviside(inputs)
ctx.save_for_backward(outputs)
return outputs
@staticmethod
def backward(ctx, grad_output):
grad_x = None
if ctx.needs_input_grad[0]:
grad_x = grad_output
return grad_x
class stdp(torch.autograd.Function):
@staticmethod
def forward(ctx, inputs):
outputs = inputs.gt(0.).float()
ctx.save_for_backward(outputs)
return outputs
@staticmethod
def backward(ctx, grad_output):
inputs, = ctx.saved_tensors
return inputs * grad_output
class STDPGrad(SurrogateFunctionBase):
def __init__(self, alpha=2., requires_grad=False):
super().__init__(alpha, requires_grad)
@staticmethod
def act_fun(x, alpha):
return stdp.apply(x)
class backeigate(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.gt(0.).float()
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
grad_input = grad_output.clone()
temp = abs(input) < 0.5
return grad_input * temp.float()
class BackEIGateGrad(SurrogateFunctionBase):
def __init__(self, alpha=2., requires_grad=False):
super().__init__(alpha, requires_grad)
@staticmethod
def act_fun(x, alpha):
return backeigate.apply(x)
class ei(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return torch.sign(input).float()
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
grad_input = grad_output.clone()
temp = abs(input) < 0.5
return grad_input * temp.float()
class EIGrad(SurrogateFunctionBase):
def __init__(self, alpha=2., requires_grad=False):
super().__init__(alpha, requires_grad)
@staticmethod
def act_fun(x, alpha):
return ei.apply(x)
================================================
FILE: braincog/base/utils/__init__.py
================================================
from .criterions import UnilateralMse, MixLoss
from .visualization import plot_tsne, plot_tsne_3d, plot_confusion_matrix
from torch.autograd import Variable
import torch
__all__ = [
'UnilateralMse', 'MixLoss',
'plot_tsne', 'plot_tsne_3d', 'plot_confusion_matrix', 'drop_path'
]
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1. - drop_prob
mask = Variable(torch.cuda.FloatTensor(
x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
================================================
FILE: braincog/base/utils/criterions.py
================================================
import numpy as np
import torch
import torch.nn.functional as F
class UnilateralMse(torch.nn.Module):
"""
扩展单边的MSE损失, 用于控制输出层的期望fire-rate 高于 thresh
:param thresh: 输出层的期望输出频率
"""
def __init__(self, thresh=1.):
super(UnilateralMse, self).__init__()
self.thresh = thresh
self.loss = torch.nn.MSELoss()
def forward(self, x, target):
# x = nn.functional.softmax(x, dim=1)
torch.clip(x, max=self.thresh)
if x.shape == target.shape:
return self.loss(x, target)
return self.loss(x, torch.zeros_like(x).scatter_(1, target.view(-1, 1), self.thresh))
class MixLoss(torch.nn.Module):
"""
混合损失函数, 可以将任意的损失函数与UnilateralMse损失混合
:param ce_loss: 任意的损失函数
"""
def __init__(self, ce_loss):
super(MixLoss, self).__init__()
self.ce = ce_loss
self.mse = UnilateralMse(1.)
def forward(self, x, target):
return 0.1 * self.ce(x, target) + self.mse(x, target)
class TetLoss(torch.nn.Module):
def __init__(self, loss_fn):
super(TetLoss, self).__init__()
self.loss_fn = loss_fn
def forward(self, x, target):
loss = 0.
for logit in x:
loss += self.loss_fn(logit, target)
return loss / x.shape[0]
class OnehotMse(torch.nn.Module):
"""
将类别转换为onehot进行mse损失计算, 用于带vote的SNN中
"""
def __init__(self, num_class):
super(OnehotMse, self).__init__()
self.num_class = num_class
self.loss_fn = torch.nn.MSELoss()
def forward(self, x, target):
target = F.one_hot(target.to(torch.int64), self.num_class).float()
loss = self.loss_fn(x, target)
return loss
================================================
FILE: braincog/base/utils/visualization.py
================================================
# encoding: utf-8
# Author : Floyed<Floyed_Shen@outlook.com>
# Datetime : 2022/7/1 11:10
# User : Floyed
# Product : PyCharm
# Project : braincog
# File : visualization.py
# explain : add t-SNE
import os
import numpy as np
import sklearn
from sklearn.manifold import TSNE
from sklearn.metrics import confusion_matrix
import torch
import torch.nn.functional as F
from einops import rearrange
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
import seaborn as sns
# Random state.
RS = 20150101
def spike_rate_vis_1d(data, output_dir=''):
assert len(data.shape) == 2, 'Shape should be (t, c).'
data = rearrange(data, 'i j -> j i')
if isinstance(data, torch.Tensor):
data = data.to('cpu').numpy()
plt.figure(figsize=(8, 8))
sns.heatmap(data, annot=None, cmap='YlGnBu')
# plt.ylim(0, _max + 1)
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
plt.show()
def spike_rate_vis(data, output_dir=''):
assert len(data.shape) == 3, 'Shape should be (t, r, c).'
data = data.mean(axis=0)
if isinstance(data, torch.Tensor):
data = data.to('cpu').numpy()
plt.figure(figsize=(8, 8))
sns.heatmap(data, annot=None, cmap='YlGnBu')
# plt.ylim(0, _max + 1)
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
plt.show()
def plot_mem_distribution(data,
output_dir='',
legend='',
xlabel='Membrane Potential',
ylabel='Density',
**kwargs):
# print(type(data), len(data))
if isinstance(data, torch.Tensor):
data = data.reshape(-1).to('cpu').numpy()
mean = data.mean()
std = data.std()
idx = np.argwhere(data < mean - 3 * std)
data = np.delete(data, idx)
idx = np.argwhere(data > mean + 3 * std)
data = np.delete(data, idx)
sns.set_style('darkgrid')
# sns.set_palette('deep', desat=.6)
sns.set_context("notebook", font_scale=1.5,
rc={"lines.linewidth": 2.5})
# fig = plt.figure(figsize=(8, 8))
# ax = fig.add_subplot(111, aspect='equal')
# sns.distplot(data, bins=int(np.sqrt(data.shape[0])),
# hist=True, kde=False, hist_kws={'histtype': 'stepfilled'}, **kwargs)
# print('hist begin')
print(len(data))
n, bins, patches = plt.hist(data,
density=True,
histtype='stepfilled',
alpha=0.618,
bins=int(np.sqrt(data.shape[0])),
**kwargs)
# print('hist finished')
# sns.kdeplot(data, color='#5294c3')
# print('kde finished')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# if legend != '':
# plt.legend(legend)
# ax.axis('tight')
if output_dir != '':
plt.savefig(output_dir, bbox_inches='tight')
print('{} saved'.format(output_dir))
# plt.show()
def plot_tsne(x, colors,output_dir="", num_classes=None):
if isinstance(x, torch.Tensor):
x = x.to('cpu').numpy()
if isinstance(colors, torch.Tensor):
colors = colors.to('cpu').numpy()
if num_classes is None:
num_classes=colors.max()+1
x = TSNE(random_state=RS, n_components=2).fit_transform(x)
sns.set_style('darkgrid')
sns.set_palette('muted')
sns.set_context("notebook", font_scale=1.5,
rc={"lines.linewidth": 2.5})
palette = np.array(sns.color_palette("hls", num_classes))
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111, aspect='equal')
sc = ax.scatter(x[:, 0], x[:, 1], lw=0, s=25,
c=palette[colors.astype(np.int)])
# plt.xlim(-25, 25)
# plt.ylim(-25, 25)
# ax.axis('off')
ax.axis('tight')
# plt.grid('off')
plt.savefig(output_dir, facecolor=fig.get_facecolor(), bbox_inches='tight')
#plt.show()
def plot_tsne_3d(x, colors,output_dir="", num_classes=None):
"""
绘制3D t-SNE聚类图, 直接将图片保存到输出路径
:param x: 输入的feature map / spike
:param colors: predicted labels 作为不同类别的颜色
:param output_dir: 图片输出的路径(包括图片名及后缀)
:return: None
"""
if isinstance(x, torch.Tensor):
x = x.to('cpu').numpy()
if isinstance(colors, torch.Tensor):
colors = colors.to('cpu').numpy()
if num_classes is None:
num_classes=colors.max()+1
x = TSNE(random_state=RS, n_components=3, perplexity=30).fit_transform(x)
# sns.set_style('darkgrid')
sns.set_palette('muted')
sns.set_context("notebook", font_scale=1.5,
rc={"lines.linewidth": 2.5})
fig = plt.figure(figsize=(8, 8))
palette = np.array(sns.color_palette("hls", num_classes))
ax = fig.add_subplot(111, projection='3d')
sc = ax.scatter(x[:, 0], x[:, 1], x[:, 2], lw=0, s=20, alpha=0.8,
c=palette[colors.astype(np.int)])
# ax.set_xlabel('X')
# ax.set_ylabel('Y')
# ax.set_zlabel('Z')
# ax.view_init(20, -120)
ax.axis('tight')
plt.savefig(output_dir, facecolor=fig.get_facecolor(), bbox_inches='tight')
#plt.show()
def plot_confusion_matrix(logits, labels, output_dir):
"""
绘制混淆矩阵图
:param logits: predicted labels
:param labels: true labels
:param output_dir: 输出路径, 需要包括文件名以及后缀
:return: None
"""
sns.set_style('darkgrid')
sns.set_palette('Blues_r')
sns.set_context("notebook", font_scale=1.,
rc={"lines.linewidth": 2.})
logits = logits.argmax(dim=1).cpu()
labels = labels.cpu()
_max = labels.max()
if _max > 10:
annot = False
else:
annot = True
# print(labels.shape, logits.shape)
conf_matrix = confusion_matrix(labels, logits)
con_mat_norm = conf_matrix.astype('float') / conf_matrix.sum(axis=1)[:, np.newaxis] # 归一化
con_mat_norm = np.around(con_mat_norm, decimals=2)
plt.figure(figsize=(8, 8))
sns.heatmap(con_mat_norm, annot=annot, cmap='Blues')
plt.ylim(0, _max + 1)
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
plt.savefig(output_dir, bbox_inches='tight')
#plt.show()
if __name__ == '__main__':
# Test for T-SNE
# x = torch.randn((100, 100))
# y = torch.randint(low=0, high=10, size=[100])
# plot_tsne_3d(x, y, output_dir='./t-sne.eps')
# Test for confusion matrix
# x = torch.rand(5012, 100)
# y = torch.randint(0, 100, (5012,))
# plot_confusion_matrix(x, y, '')
# Test for Mem Distribution
x = torch.randn(100000)
plot_mem_distribution(x, legend=['test'])
================================================
FILE: braincog/datasets/CUB2002011.py
================================================
import os
import pandas as pd
from torchvision.datasets import VisionDataset
from torchvision.datasets.folder import default_loader
from torchvision.datasets.utils import download_file_from_google_drive
class CUB2002011(VisionDataset):
"""`CUB-200-2011 <http://www.vision.caltech.edu/visipedia/CUB-200-2011.html>`_ Dataset.
Args:
root (string): Root directory of the dataset.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = 'CUB_200_2011/images'
# url = 'http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz'
file_id = '1hbzc_P1FuxMkcabkgn9ZKinBwW683j45'
filename = 'CUB_200_2011.tgz'
tgz_md5 = '97eceeb196236b17998738112f37df78'
def __init__(self, root, train=True, transform=None, target_transform=None, download=False):
super(CUB2002011, self).__init__(root, transform=transform, target_transform=target_transform)
self.loader = default_loader
self.train = train
if download:
self._download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted. You can use download=True to download it')
def _load_metadata(self):
images = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'images.txt'), sep=' ',
names=['img_id', 'filepath'])
image_class_labels = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'image_class_labels.txt'),
sep=' ', names=['img_id', 'target'])
train_test_split = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'train_test_split.txt'),
sep=' ', names=['img_id', 'is_training_img'])
data = images.merge(image_class_labels, on='img_id')
self.data = data.merge(train_test_split, on='img_id')
class_names = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'classes.txt'),
sep=' ', names=['class_name'], usecols=[1])
self.class_names = class_names['class_name'].to_list()
if self.train:
self.data = self.data[self.data.is_training_img == 1]
else:
self.data = self.data[self.data.is_training_img == 0]
def _check_integrity(self):
try:
self._load_metadata()
except Exception:
return False
for index, row in self.data.iterrows():
filepath = os.path.join(self.root, self.base_folder, row.filepath)
if not os.path.isfile(filepath):
print(filepath)
return False
return True
def _download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_file_from_google_drive(self.file_id, self.root, self.filename, self.tgz_md5)
with tarfile.open(os.path.join(self.root, self.filename), "r:gz") as tar:
tar.extractall(path=self.root)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
sample = self.data.iloc[idx]
path = os.path.join(self.root, self.base_folder, sample.filepath)
target = sample.target - 1 # Targets start at 1 by default, so shift to 0
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
if __name__ == '__main__':
train_dataset = CUB2002011('./cub2011', train=True, download=False)
test_dataset = CUB2002011('./cub2011', train=False, download=False)
================================================
FILE: braincog/datasets/ESimagenet/ES_imagenet.py
================================================
# -*- coding: utf-8 -*-
# Time : 2022/11/1 11:06
# Author : Regulus
# FileName: ES_imagenet.py
# Explain:
# Software: PyCharm
import numpy as np
import torch
import linecache
import torch.utils.data as data
class ESImagenet_Dataset(data.Dataset):
def __init__(self, mode, data_set_path='/data/dvsimagenet/', transform=None):
super().__init__()
self.mode = mode
self.filenames = []
self.trainpath = data_set_path + 'train'
self.testpath = data_set_path + 'val'
self.traininfotxt = data_set_path + 'trainlabel.txt'
self.testinfotxt = data_set_path + 'vallabel.txt'
self.formats = '.npz'
self.transform = transform
if mode == 'train':
self.path = self.trainpath
trainfile = open(self.traininfotxt, 'r')
for line in trainfile:
filename, classnum, a, b = line.split()
realname, sub = filename.split('.')
self.filenames.append(realname + self.formats)
else:
self.path = self.testpath
testfile = open(self.testinfotxt, 'r')
for line in testfile:
filename, classnum, a, b = line.split()
realname, sub = filename.split('.')
self.filenames.append(realname + self.formats)
def __getitem__(self, index):
if self.mode == 'train':
info = linecache.getline(self.traininfotxt, index + 1)
else:
info = linecache.getline(self.testinfotxt, index + 1)
filename, classnum, a, b = info.split()
realname, sub = filename.split('.')
filename = realname + self.formats
filename = self.path + r'/' + filename
classnum = int(classnum)
a = int(a)
b = int(b)
datapos = np.load(filename)['pos'].astype(np.float64)
dataneg = np.load(filename)['neg'].astype(np.float64)
dy = (254 - b) // 2
dx = (254 - a) // 2
input = torch.zeros([2, 8, 256, 256])
x = datapos[:, 0] + dx
y = datapos[:, 1] + dy
t = datapos[:, 2] - 1
input[0, t, x, y] = 1
x = dataneg[:, 0] + dx
y = dataneg[:, 1] + dy
t = dataneg[:, 2] - 1
input[1, t, x, y] = 1
reshape = input[:, :, 16:240, 16:240].permute(0, 1, 2, 3).contiguous()
if self.transform is not None:
reshape = self.transform(reshape)
label = torch.tensor([classnum])
return reshape, label
def __len__(self):
return len(self.filenames)
================================================
FILE: braincog/datasets/ESimagenet/__init__.py
================================================
# -*- coding: utf-8 -*-
# Time : 2022/11/1 11:05
# Author : Regulus
# FileName: __init__.py.py
# Explain:
# Software: PyCharm
"""
from: https://github.com/lyh983012/ES-imagenet-master
"""
__all__ = ['ES_imagenet', 'reconstructed_ES_imagenet']
from . import (
ES_imagenet,
reconstructed_ES_imagenet
)
================================================
FILE: braincog/datasets/ESimagenet/reconstructed_ES_imagenet.py
================================================
# -*- coding: utf-8 -*-
# Time : 2022/11/1 11:06
# Author : Regulus
# FileName: reconstructed_ES_imagenet.py
# Explain:
# Software: PyCharm
import numpy as np
import torch
import linecache
import torch.utils.data as data
from tqdm import tqdm
class ESImagenet2D_Dataset(data.Dataset):
def __init__(self, mode, data_set_path='/data/ESimagenet-0.18/', transform=None):
super().__init__()
self.mode = mode
self.filenames = []
self.trainpath = data_set_path + 'train'
self.testpath = data_set_path + 'val'
self.traininfotxt = data_set_path + 'trainlabel.txt'
self.testinfotxt = data_set_path + 'vallabel.txt'
self.formats = '.npz'
self.transform = transform
if mode == 'train':
self.path = self.trainpath
trainfile = open(self.traininfotxt, 'r')
for line in trainfile:
filename, classnum, a, b = line.split()
realname, sub = filename.split('.')
self.filenames.append(realname + self.formats)
trainfile = open(self.traininfotxt, 'r')
self.infolist = trainfile.readlines()
else:
self.path = self.testpath
testfile = open(self.testinfotxt, 'r')
for line in testfile:
filename, classnum, a, b = line.split()
realname, sub = filename.split('.')
self.filenames.append(realname + self.formats)
testfile = open(self.testinfotxt, 'r')
self.infolist = testfile.readlines()
def __getitem__(self, index):
info = self.infolist[index]
filename, classnum, a, b = info.split()
realname, sub = filename.split('.')
filename = realname + self.formats
filename = self.path + r'/' + filename
classnum = int(classnum)
a = int(a)
b = int(b)
with open(filename, "rb") as f:
data = np.load(f)
datapos = data['pos'].astype(np.float64)
dataneg = data['neg'].astype(np.float64)
tracex = [0, 2, 1, 0, 2, 1, 1, 2]
tracey = [2, 1, 0, 1, 2, 0, 1, 1]
dy = (254 - b) // 2
dx = (254 - a) // 2
input = torch.zeros([2, 8, 256, 256])
x = datapos[:, 0] + dx
y = datapos[:, 1] + dy
t = datapos[:, 2] - 1
input[0, t, x, y] += 1
x = dataneg[:, 0] + dx
y = dataneg[:, 1] + dy
t = dataneg[:, 2] - 1
input[1, t, x, y] += 1
sum_gary_data = torch.zeros([1, 1, 256, 256])
reshape = input[:, :, 16:240, 16:240]
H = 224
W = 224
for t in range(8):
dx = tracex[t]
dy = tracey[t]
sum_gary_data[0, 0, 2 - dx:2 - dx + H, 2 - dy:2 - dy + W] += reshape[0, t, :, :]
sum_gary_data[0, 0, 2 - dx:2 - dx + H, 2 - dy:2 - dy + W] -= reshape[1, t, :, :]
sum_gary_data = sum_gary_data[:, :, 1:225, 1:225]
# if self.transform is not None:
# sum_gary_data = self.transform(sum_gary_data)
label = classnum
return sum_gary_data, label
def __len__(self):
return len(self.filenames)
================================================
FILE: braincog/datasets/NOmniglot/NOmniglot.py
================================================
from torch.utils.data import Dataset
from braincog.datasets.NOmniglot.utils import *
class NOmniglot(Dataset):
def __init__(self, root='data/', frames_num=12, train=True, data_type='event',
transform=None, target_transform=None, use_npz=False, crop=True, create=True, thread_num=16):
super().__init__()
self.crop = crop
self.data_type = data_type
self.use_npz = use_npz
self.transform = transform
self.target_transform = target_transform
events_npy_root = os.path.join(root, 'events_npy', 'background' if train else "evaluation")
frames_root = os.path.join(root, f'fnum_{frames_num}_dtype_{data_type}_npz_{use_npz}',
'background' if train else "evaluation")
if not os.path.exists(frames_root) and create:
if not os.path.exists(events_npy_root) and create:
os.makedirs(events_npy_root)
print('creating event data..')
convert_aedat4_dir_to_events_dir(root, train)
else:
print(f'npy format events data root {events_npy_root}, already exists')
os.makedirs(frames_root)
print('creating frames data..')
convert_events_dir_to_frames_dir(events_npy_root, frames_root, '.npy', frames_num, data_type,
thread_num=thread_num, compress=use_npz)
else:
print(f'frames data root {frames_root} already exists.')
self.datadict, self.num_classes = list_class_files(events_npy_root, frames_root, True, use_npz=use_npz)
self.datalist = []
for i in self.datadict:
self.datalist.extend([(j, i) for j in self.datadict[i]])
def __len__(self):
return len(self.datalist)
def __getitem__(self, index):
image, label = self.datalist[index]
image, label = self.readimage(image, label)
return image, label
def readimage(self, image, label):
if self.use_npz:
image = torch.tensor(np.load(image)['arr_0']).float()
else:
image = torch.tensor(np.load(image)).float()
if self.crop:
image = image[:, :, 4:254, 54:304]
if self.transform is not None: image = self.transform(image)
if self.target_transform is not None: label = self.target_transform(label)
return image, label
================================================
FILE: braincog/datasets/NOmniglot/__init__.py
================================================
__all__ = ['NOmniglot', 'nomniglot_full', 'nomniglot_nw_ks','nomniglot_pair','utils']
from . import (
NOmniglot,
nomniglot_full,
nomniglot_nw_ks,
nomniglot_pair,
utils
)
================================================
FILE: braincog/datasets/NOmniglot/nomniglot_full.py
================================================
import torch
from torch.utils.data import Dataset, DataLoader
from braincog.datasets.NOmniglot.NOmniglot import NOmniglot
class NOmniglotfull(Dataset):
'''
solve few-shot learning as general classification problem,
We combine the original training set with the test set and take 3/4 as the training set
'''
def __init__(self, root='data/', train=True, frames_num=4, data_type='event',
transform=None, target_transform=None, use_npz=False, crop=True, create=True):
super().__init__()
trainSet = NOmniglot(root=root, train=True, frames_num=frames_num, data_type=data_type,
transform=transform, target_transform=target_transform,
use_npz=use_npz, crop=crop, create=create)
testSet = NOmniglot(root=root, train=False, frames_num=frames_num, data_type=data_type,
transform=transform, target_transform=lambda x: x + 964,
use_npz=use_npz, crop=crop, create=create)
self.data = torch.utils.data.ConcatDataset([trainSet, testSet])
if train:
self.id = [j for j in range(len(self.data)) if j % 20 in [i for i in range(15)]]
else:
self.id = [j for j in range(len(self.data)) if j % 20 in [i for i in range(15, 20)]]
def __len__(self):
return len(self.id)
def __getitem__(self, index):
image, label = self.data[self.id[index]]
return image, label
if __name__ == '__main__':
db_train = NOmniglotfull('../../data/', train=True, frames_num=4, data_type='event')
dataloadertrain = DataLoader(db_train, batch_size=16, shuffle=True, num_workers=16, pin_memory=True)
for x_spt, y_spt, x_qry, y_qry in dataloadertrain:
print(x_spt.shape)
================================================
FILE: braincog/datasets/NOmniglot/nomniglot_nw_ks.py
================================================
import torch
import torchvision
import numpy as np
from torch.utils.data import Dataset, DataLoader
from braincog.datasets.NOmniglot.NOmniglot import NOmniglot
class NOmniglotNWayKShot(Dataset):
'''
get n-wway k-shot data as meta learning
We set the sampling times of each epoch as "len(self.dataSet) // (self.n_way * (self.k_shot + self.k_query))"
you can increase or decrease the number of epochs to determine the total training times
'''
def __init__(self, root, n_way, k_shot, k_query, train=True, frames_num=12, data_type='event',
transform=torchvision.transforms.Resize((28, 28))):
self.dataSet = NOmniglot(root=root, train=train,
frames_num=frames_num, data_type=data_type, transform=transform)
self.n_way = n_way # n way
self.k_shot = k_shot # k shot
self.k_query = k_query # k query
assert (k_shot + k_query) <= 20
self.length = 256
self.data_cache = self.load_data_cache(self.dataSet.datadict, self.length)
def load_data_cache(self, data_dict, length):
'''
The dataset is sampled randomly length times, and the address is saved to obtain
'''
data_cache = []
for i in range(length):
selected_cls = np.random.choice(len(data_dict), self.n_way, False)
x_spts, y_spts, x_qrys, y_qrys = [], [], [], []
for j, cur_class in enumerate(selected_cls):
selected_img = np.random.choice(20, self.k_shot + self.k_query, False)
x_spts.append(np.array(data_dict[cur_class])[selected_img[:self.k_shot]])
x_qrys.append(np.array(data_dict[cur_class])[selected_img[self.k_shot:]])
y_spts.append([j for _ in range(self.k_shot)])
y_qrys.append([j for _ in range(self.k_query)])
shufflespt = np.random.choice(self.n_way * self.k_shot, self.n_way * self.k_shot, False)
shuffleqry = np.random.choice(self.n_way * self.k_query, self.n_way * self.k_query, False)
temp = [np.array(x_spts).reshape(-1)[shufflespt], np.array(y_spts).reshape(-1)[shufflespt],
np.array(x_qrys).reshape(-1)[shuffleqry], np.array(y_qrys).reshape(-1)[shuffleqry]]
data_cache.append(temp)
return data_cache
def __getitem__(self, index):
x_spts, y_spts, x_qrys, y_qrys = self.data_cache[index]
x_sptst, y_sptst, x_qryst, y_qryst = [], [], [], []
for i, j in zip(x_spts, y_spts):
i, j = self.dataSet.readimage(i, j)
x_sptst.append(i.unsqueeze(0))
y_sptst.append(j)
for i, j in zip(x_qrys, y_qrys):
i, j = self.dataSet.readimage(i, j)
x_qryst.append(i.unsqueeze(0))
y_qryst.append(j)
return torch.cat(x_sptst, dim=0), np.array(y_sptst), torch.cat(x_qryst, dim=0), np.array(y_qryst)
def reset(self):
self.data_cache = self.load_data_cache(self.dataSet.datadict, self.length)
def __len__(self):
return len(self.data_cache)
if __name__ == "__main__":
db_train = NOmniglotNWayKShot('./data/', n_way=5, k_shot=1, k_query=15,
frames_num=4, data_type='frequency', train=True)
dataloadertrain = DataLoader(db_train, batch_size=16, shuffle=True, num_workers=16, pin_memory=True)
for x_spt, y_spt, x_qry, y_qry in dataloadertrain:
print(x_spt.shape)
db_train.resampling()
================================================
FILE: braincog/datasets/NOmniglot/nomniglot_pair.py
================================================
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
from numpy.random import choice as npc
import random
import torch.nn.functional as F
from braincog.datasets.NOmniglot import NOmniglot
class NOmniglotTrainSet(Dataset):
'''
Dataloader for Siamese Net
The pairs of similar samples are labeled as 1, and those of different samples are labeled as 0
'''
def __init__(self, root='data/', use_frame=True, frames_num=10, data_type='event', use_npz=False, resize=None):
super(NOmniglotTrainSet, self).__init__()
self.resize = resize
self.data_type = data_type
self.use_frame = use_frame
self.dataSet = NOmniglot(root=root, train=True, frames_num=frames_num, data_type=data_type, use_npz=use_npz)
self.datas, self.num_classes = self.dataSet.datadict, self.dataSet.num_classes
np.random.seed(0)
def __len__(self):
'''
Sampling upper limit, you can set the maximum sampling times when using to terminate
'''
return 21000000
def __getitem__(self, index):
# get image from same class
if index % 2 == 1:
label = 1.0
idx1 = random.randint(0, self.num_classes - 1)
image1 = random.choice(self.datas[idx1])
image2 = random.choice(self.datas[idx1])
# get image from different class
else:
label = 0.0
idx1 = random.randint(0, self.num_classes - 1)
idx2 = random.randint(0, self.num_classes - 1)
while idx1 == idx2:
idx2 = random.randint(0, self.num_classes - 1)
image1 = random.choice(self.datas[idx1])
image2 = random.choice(self.datas[idx2])
if self.use_frame:
if self.data_type == 'event':
image1 = torch.tensor(np.load(image1)['arr_0']).float()
image2 = torch.tensor(np.load(image2)['arr_0']).float()
elif self.data_type == 'frequency':
image1 = torch.tensor(np.load(image1)['arr_0']).float()
image2 = torch.tensor(np.load(image2)['arr_0']).float()
else:
raise NotImplementedError
if self.resize is not None:
image1 = image1[:, :, 4:254, 54:304]
image1 = F.interpolate(image1, size=(self.resize, self.resize))
image2 = image2[:, :, 4:254, 54:304]
image2 = F.interpolate(image2, size=(self.resize, self.resize))
return image1, image2, torch.from_numpy(np.array([label], dtype=np.float32))
class NOmniglotTestSet(Dataset):
'''
Dataloader for Siamese Net
'''
def __init__(self, root='data/', time=1000, way=20, shot=1, query=1, use_frame=True, frames_num=10, data_type='event', use_npz=True, resize=None):
super(NOmniglotTestSet, self).__init__()
self.resize = resize
self.use_frame = use_frame
self.time = time # Sampling times
self.way = way
self.shot = shot
self.query = query
self.img1 = None # Fix test sample while sampling support set
self.c1 = None # Fixed categories when sampling multiple samples
self.c2 = None
self.select_class = [] # selected classes
self.select_sample = [] # selected samples
self.data_type = data_type
np.random.seed(0)
self.dataSet = NOmniglot(root=root, train=False, frames_num=frames_num, data_type=data_type, use_npz=use_npz)
self.datas, self.num_classes = self.dataSet.datadict, self.dataSet.num_classes
def __len__(self):
'''
In general, the total number of test tasks is 1000.
Since one test sample is collected at a time, way * shot support samples are used for each test
'''
return self.time * self.way * self.shot
def __getitem__(self, index):
'''
The 0th sample of each way*shot is used for query and recorded in the selected sample
to achieve the effect of selecting K +1
'''
idx = index % (self.way * self.shot)
# generate image pair from same class
if idx == 0: #
self.select_class = []
self.c1 = random.randint(0, self.num_classes - 1)
self.c2 = self.c1
sind = random.randint(0, len(self.datas[self.c1]) - 1)
self.select_sample.append(sind)
self.img1 = self.datas[self.c1][sind]
sind = random.randint(0, len(self.datas[self.c2]) - 1)
while sind in self.select_sample:
sind = random.randint(0, len(self.datas[self.c2]) - 1)
img2 = self.datas[self.c1][sind]
self.select_sample.append(sind)
self.select_class.append(self.c1)
# generate image pair from different class
else:
if index % self.shot == 0:
self.c2 = random.randint(0, self.num_classes - 1)
while self.c2 in self.select_class: # self.c1 == c2:
self.c2 = random.randint(0, self.num_classes - 1)
self.select_class.append(self.c2)
self.select_sample = []
sind = random.randint(0, len(self.datas[self.c2]) - 1)
while sind in self.select_sample:
sind = random.randint(0, len(self.datas[self.c2]) - 1)
img2 = self.datas[self.c2][sind]
self.select_sample.append(sind)
if self.use_frame:
if self.data_type == 'event':
img1 = torch.tensor(np.load(self.img1)['arr_0']).float()
img2 = torch.tensor(np.load(img2)['arr_0']).float()
elif self.data_type == 'frequency':
gitextract_qe2qoke6/ ├── .gitignore ├── LICENSE ├── README.md ├── braincog/ │ ├── __init__.py │ ├── base/ │ │ ├── __init__.py │ │ ├── brainarea/ │ │ │ ├── BrainArea.py │ │ │ ├── IPL.py │ │ │ ├── Insula.py │ │ │ ├── PFC.py │ │ │ ├── __init__.py │ │ │ ├── basalganglia.py │ │ │ └── dACC.py │ │ ├── connection/ │ │ │ ├── CustomLinear.py │ │ │ ├── __init__.py │ │ │ └── layer.py │ │ ├── conversion/ │ │ │ ├── __init__.py │ │ │ ├── convertor.py │ │ │ ├── merge.py │ │ │ └── spicalib.py │ │ ├── encoder/ │ │ │ ├── __init__.py │ │ │ ├── encoder.py │ │ │ ├── population_coding.py │ │ │ └── qs_coding.py │ │ ├── learningrule/ │ │ │ ├── BCM.py │ │ │ ├── Hebb.py │ │ │ ├── RSTDP.py │ │ │ ├── STDP.py │ │ │ ├── STP.py │ │ │ └── __init__.py │ │ ├── node/ │ │ │ ├── __init__.py │ │ │ └── node.py │ │ ├── strategy/ │ │ │ ├── LateralInhibition.py │ │ │ ├── __init__.py │ │ │ └── surrogate.py │ │ └── utils/ │ │ ├── __init__.py │ │ ├── criterions.py │ │ └── visualization.py │ ├── datasets/ │ │ ├── CUB2002011.py │ │ ├── ESimagenet/ │ │ │ ├── ES_imagenet.py │ │ │ ├── __init__.py │ │ │ └── reconstructed_ES_imagenet.py │ │ ├── NOmniglot/ │ │ │ ├── NOmniglot.py │ │ │ ├── __init__.py │ │ │ ├── nomniglot_full.py │ │ │ ├── nomniglot_nw_ks.py │ │ │ ├── nomniglot_pair.py │ │ │ └── utils.py │ │ ├── StanfordDogs.py │ │ ├── TinyImageNet.py │ │ ├── __init__.py │ │ ├── bullying10k/ │ │ │ ├── __init__.py │ │ │ └── bullying10k.py │ │ ├── cut_mix.py │ │ ├── datasets.py │ │ ├── gen_input_signal.py │ │ ├── hmdb_dvs/ │ │ │ ├── __init__.py │ │ │ └── hmdb_dvs.py │ │ ├── ncaltech101/ │ │ │ ├── __init__.py │ │ │ └── ncaltech101.py │ │ ├── rand_aug.py │ │ ├── scripts/ │ │ │ ├── testlist01.txt │ │ │ └── ucf101_dvs_preprocessing.py │ │ ├── ucf101_dvs/ │ │ │ ├── __init__.py │ │ │ └── ucf101_dvs.py │ │ └── utils.py │ ├── model_zoo/ │ │ ├── NeuEvo/ │ │ │ ├── __init__.py │ │ │ ├── architect.py │ │ │ ├── genotypes.py │ │ │ ├── model.py │ │ │ ├── model_search.py │ │ │ ├── operations.py │ │ │ └── others.py │ │ ├── __init__.py │ │ ├── backeinet.py │ │ ├── base_module.py │ │ ├── bdmsnn.py │ │ ├── convnet.py │ │ ├── fc_snn.py │ │ ├── glsnn.py │ │ ├── linearNet.py │ │ ├── nonlinearNet.py │ │ ├── qsnn.py │ │ ├── resnet.py │ │ ├── resnet19_snn.py │ │ ├── rsnn.py │ │ ├── sew_resnet.py │ │ └── vgg_snn.py │ └── utils.py ├── docs/ │ ├── Makefile │ ├── make.bat │ └── source/ │ ├── conf.py │ ├── examples/ │ │ ├── Brain_Cognitive_Function_Simulation/ │ │ │ ├── drosophila.md │ │ │ └── index.rst │ │ ├── Decision_Making/ │ │ │ ├── BDM_SNN.md │ │ │ ├── RL.md │ │ │ └── index.rst │ │ ├── Knowledge_Representation_and_Reasoning/ │ │ │ ├── CKRGSNN.md │ │ │ ├── CRSNN.md │ │ │ ├── SPSNN.md │ │ │ ├── index.rst │ │ │ └── musicMemory.md │ │ ├── Multi-scale_Brain_Structure_Simulation/ │ │ │ ├── Corticothalamic_minicolumn.md │ │ │ ├── HumanBrain.md │ │ │ ├── Human_PFC.md │ │ │ ├── MacaqueBrain.md │ │ │ ├── index.rst │ │ │ └── mouse_brain.md │ │ ├── Perception_and_Learning/ │ │ │ ├── Conversion.md │ │ │ ├── MultisensoryIntegration.md │ │ │ ├── QSNN.md │ │ │ ├── UnsupervisedSTDP.md │ │ │ ├── img_cls/ │ │ │ │ ├── bp.md │ │ │ │ ├── glsnn.md │ │ │ │ └── index.rst │ │ │ └── index.rst │ │ ├── Social_Cognition/ │ │ │ ├── Mirror_Test.md │ │ │ ├── ToM.md │ │ │ └── index.rst │ │ └── index.rst │ ├── index.rst │ ├── modules.rst │ └── setup.rst ├── docs.md ├── documents/ │ ├── Data_engine.md │ ├── Lectures.md │ ├── Pub_brain_inspired_AI.md │ ├── Pub_brain_simulation.md │ ├── Pub_sh_codesign.md │ ├── Publication.md │ └── Tutorial.md ├── examples/ │ ├── Brain_Cognitive_Function_Simulation/ │ │ └── drosophila/ │ │ ├── README.md │ │ └── drosophila.py │ ├── Embodied_Cognition/ │ │ └── RHI/ │ │ ├── RHI_Test.py │ │ ├── RHI_Train.py │ │ └── ReadMe.md │ ├── Hardware_acceleration/ │ │ ├── README.md │ │ ├── firefly_v1_schedule_on_pynq.py │ │ ├── standalone_utils.py │ │ ├── ultra96_test.py │ │ └── zcu104_test.py │ ├── Knowledge_Representation_and_Reasoning/ │ │ ├── CKRGSNN/ │ │ │ ├── README.md │ │ │ ├── main.py │ │ │ └── sub_Conceptnet.csv │ │ ├── CRSNN/ │ │ │ ├── README.md │ │ │ └── main.py │ │ ├── SPSNN/ │ │ │ ├── README.md │ │ │ └── main.py │ │ └── musicMemory/ │ │ ├── Areas/ │ │ │ ├── apac.py │ │ │ ├── cortex.py │ │ │ ├── pac.py │ │ │ └── pfc.py │ │ ├── Modal/ │ │ │ ├── PAC.py │ │ │ ├── cluster.py │ │ │ ├── composercluster.py │ │ │ ├── composerlayer.py │ │ │ ├── composerlifneuron.py │ │ │ ├── genrecluster.py │ │ │ ├── genrelayer.py │ │ │ ├── genrelifneuron.py │ │ │ ├── izhikevichneuron.py │ │ │ ├── layer.py │ │ │ ├── lifneuron.py │ │ │ ├── note.py │ │ │ ├── notecluster.py │ │ │ ├── notelifneuron.py │ │ │ ├── notesequencelayer.py │ │ │ ├── pitch.py │ │ │ ├── sequencelayer.py │ │ │ ├── sequencememory.py │ │ │ ├── synapse.py │ │ │ ├── tempocluster.py │ │ │ ├── tempolifneuron.py │ │ │ ├── temposequencelayer.py │ │ │ ├── titlecluster.py │ │ │ ├── titlelayer.py │ │ │ └── titlelifneuron.py │ │ ├── README.md │ │ ├── api/ │ │ │ └── music_engine_api.py │ │ ├── conf/ │ │ │ ├── GenreData.txt │ │ │ ├── MIDIData.txt │ │ │ └── conf.py │ │ ├── inputs/ │ │ │ ├── 1.txt │ │ │ ├── Data.txt │ │ │ ├── GenreData.txt │ │ │ ├── MIDIData.txt │ │ │ ├── chords.csv │ │ │ ├── chords.xlsx │ │ │ ├── information.csv │ │ │ ├── keyIndex.csv │ │ │ ├── keys.csv │ │ │ ├── keys.xlsx │ │ │ ├── modeindex.csv │ │ │ ├── modeindex.xlsx │ │ │ ├── pitch2midi.csv │ │ │ └── tones2.csv │ │ ├── result_output/ │ │ │ └── tone learning/ │ │ │ ├── C major_20241121155522.mid │ │ │ ├── C major_20241122093822.mid │ │ │ ├── C major_20241122094000.mid │ │ │ ├── C major_20241122094419.mid │ │ │ └── C major_20241122094736.mid │ │ ├── task/ │ │ │ ├── Bach_generated.mid │ │ │ ├── Classical_generated.mid │ │ │ ├── Sonate C Major.Mid_recall.mid │ │ │ ├── melody_generated.mid │ │ │ ├── mode-conditioned learning.py │ │ │ ├── musicGeneration.py │ │ │ └── musicMemory.py │ │ ├── testData/ │ │ │ ├── Bach/ │ │ │ │ └── prelude C major.mid │ │ │ ├── JayZhou/ │ │ │ │ └── rainbow.mid │ │ │ └── Mozart/ │ │ │ └── Sonate C major.mid │ │ └── tools/ │ │ ├── __init__.py │ │ ├── generateData.py │ │ ├── hamonydataset_test.py │ │ ├── msg.py │ │ ├── msgq.py │ │ ├── oscillations.py │ │ ├── position.txt │ │ ├── readjson.py │ │ ├── testSound.py │ │ ├── testmusic21.py │ │ ├── testopengl.py │ │ ├── testwave.py │ │ └── xmlParser.py │ ├── MotorControl/ │ │ └── experimental/ │ │ ├── README.md │ │ ├── brain_area.py │ │ ├── main.py │ │ └── model.py │ ├── Multiscale_Brain_Structure_Simulation/ │ │ ├── CorticothalamicColumn/ │ │ │ ├── README.md │ │ │ ├── data/ │ │ │ │ ├── __init__.py │ │ │ │ └── globaldata.py │ │ │ ├── main.py │ │ │ ├── model/ │ │ │ │ ├── __init__.py │ │ │ │ ├── cortex.py │ │ │ │ ├── cortex_thalamus.py │ │ │ │ ├── dendrite.py │ │ │ │ ├── fire.csv │ │ │ │ ├── layer.py │ │ │ │ ├── synapse.py │ │ │ │ └── thalamus.py │ │ │ └── tools/ │ │ │ ├── __init__.py │ │ │ ├── cortical.csv │ │ │ ├── exdata.py │ │ │ ├── layer.csv │ │ │ ├── neuron.csv │ │ │ └── synapse.csv │ │ ├── Corticothalamic_Brain_Model/ │ │ │ ├── Bioinformatics_propofol_circle.py │ │ │ ├── Readme.md │ │ │ └── spectrogram.py │ │ ├── HumanBrain/ │ │ │ ├── README.md │ │ │ ├── human_brain.py │ │ │ └── human_multi.py │ │ ├── Human_Brain_Model/ │ │ │ ├── NA.py │ │ │ ├── Readme.md │ │ │ ├── gc.py │ │ │ ├── main_246.py │ │ │ ├── main_84.py │ │ │ ├── pci.py │ │ │ ├── pci_246.py │ │ │ └── spectrogram.py │ │ ├── Human_PFC_Model/ │ │ │ ├── README.md │ │ │ └── Six_Layer_PFC.py │ │ ├── MacaqueBrain/ │ │ │ ├── README.md │ │ │ └── macaque_brain.py │ │ └── MouseBrain/ │ │ ├── README.md │ │ └── mouse_brain.py │ ├── Perception_and_Learning/ │ │ ├── Conversion/ │ │ │ ├── burst_conversion/ │ │ │ │ ├── CIFAR10_VGG16.py │ │ │ │ ├── README.md │ │ │ │ └── converted_CIFAR10.py │ │ │ └── msat_conversion/ │ │ │ ├── CIFAR10_VGG16.py │ │ │ ├── README.md │ │ │ ├── converted_CIFAR10.py │ │ │ └── convertor.py │ │ ├── IllusionPerception/ │ │ │ └── AbuttingGratingIllusion/ │ │ │ ├── distortion/ │ │ │ │ ├── __init__.py │ │ │ │ └── abutting_grating_illusion/ │ │ │ │ ├── __init__.py │ │ │ │ └── abutting_grating_distortion.py │ │ │ └── main.py │ │ ├── MultisensoryIntegration/ │ │ │ ├── README.md │ │ │ └── code/ │ │ │ ├── MultisensoryIntegrationDEMO_AM.py │ │ │ ├── MultisensoryIntegrationDEMO_IM.py │ │ │ └── measure_and_visualization.py │ │ ├── NeuEvo/ │ │ │ ├── auto_augment.py │ │ │ ├── main.py │ │ │ ├── separate_loss.py │ │ │ ├── train.py │ │ │ ├── train_search.py │ │ │ └── utils.py │ │ ├── QSNN/ │ │ │ ├── README.md │ │ │ └── main.py │ │ ├── UnsupervisedSTDP/ │ │ │ ├── Readme.md │ │ │ └── codef.py │ │ └── img_cls/ │ │ ├── bp/ │ │ │ ├── README.md │ │ │ ├── main.py │ │ │ ├── main_backei.py │ │ │ └── main_simplified.py │ │ ├── glsnn/ │ │ │ ├── README.md │ │ │ └── cls_glsnn.py │ │ ├── spiking_capsnet/ │ │ │ ├── README.md │ │ │ └── spikingcaps.py │ │ └── transfer_for_dvs/ │ │ ├── GradCAM_visualization.py │ │ ├── README.md │ │ ├── datasets.py │ │ ├── main.py │ │ ├── main_transfer.py │ │ └── main_visual_losslandscape.py │ ├── Snn_safety/ │ │ ├── DPSNN/ │ │ │ ├── Readme.txt │ │ │ ├── load_data.py │ │ │ ├── main_dpsnn.py │ │ │ └── model.py │ │ └── RandHet-SNN/ │ │ ├── README.md │ │ ├── evaluate.py │ │ ├── my_node.py │ │ ├── sew_resnet.py │ │ ├── train.py │ │ └── utils.py │ ├── Social_Cognition/ │ │ ├── FOToM/ │ │ │ ├── algorithms/ │ │ │ │ ├── ToM_class.py │ │ │ │ ├── __init__.py │ │ │ │ ├── maddpg.py │ │ │ │ └── tom11.py │ │ │ ├── common/ │ │ │ │ ├── __init__.py │ │ │ │ ├── distributions.py │ │ │ │ ├── tile_images.py │ │ │ │ └── vec_env/ │ │ │ │ ├── __init__.py │ │ │ │ └── vec_env.py │ │ │ ├── evaluate.py │ │ │ ├── main.py │ │ │ ├── multiagent/ │ │ │ │ ├── __init__.py │ │ │ │ ├── core.py │ │ │ │ ├── environment.py │ │ │ │ ├── multi_discrete.py │ │ │ │ ├── policy.py │ │ │ │ ├── rendering.py │ │ │ │ ├── scenario.py │ │ │ │ └── scenarios/ │ │ │ │ ├── __init__.py │ │ │ │ ├── hetero_spread.py │ │ │ │ ├── simple.py │ │ │ │ ├── simple_adversary.py │ │ │ │ ├── simple_crypto.py │ │ │ │ ├── simple_push.py │ │ │ │ ├── simple_reference.py │ │ │ │ ├── simple_speaker_listener.py │ │ │ │ ├── simple_spread.py │ │ │ │ ├── simple_tag.py │ │ │ │ └── simple_world_comm.py │ │ │ ├── readme.md │ │ │ └── utils/ │ │ │ ├── __init__.py │ │ │ ├── agents.py │ │ │ ├── buffer.py │ │ │ ├── env_wrappers.py │ │ │ ├── make_env.py │ │ │ ├── misc.py │ │ │ ├── multiprocessing.py │ │ │ ├── networks.py │ │ │ └── noise.py │ │ ├── Intention_Prediction/ │ │ │ └── Intention_Prediction.py │ │ ├── MAToM-SNN/ │ │ │ ├── LICENSE │ │ │ ├── MPE/ │ │ │ │ ├── __init__.py │ │ │ │ ├── agents/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── agents.py │ │ │ │ ├── common/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── distributions.py │ │ │ │ │ ├── tile_images.py │ │ │ │ │ └── vec_env/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── vec_env.py │ │ │ │ ├── main.py │ │ │ │ ├── multiagent/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── scenarios/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── simple.py │ │ │ │ │ ├── simple_crypto.py │ │ │ │ │ ├── simple_push.py │ │ │ │ │ ├── simple_reference.py │ │ │ │ │ ├── simple_speaker_listener.py │ │ │ │ │ ├── simple_spread.py │ │ │ │ │ └── simple_world_comm.py │ │ │ │ ├── policy/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── maddpg.py │ │ │ │ └── utils/ │ │ │ │ ├── __init__.py │ │ │ │ ├── buffer.py │ │ │ │ ├── env_wrappers.py │ │ │ │ ├── make_env.py │ │ │ │ ├── misc.py │ │ │ │ ├── multiprocessing.py │ │ │ │ ├── networks.py │ │ │ │ └── noise.py │ │ │ ├── README.md │ │ │ └── STAG/ │ │ │ ├── agents/ │ │ │ │ ├── __init__.py │ │ │ │ └── sagent.py │ │ │ ├── common_sr/ │ │ │ │ ├── __init__.py │ │ │ │ ├── arguments.py │ │ │ │ ├── dummy_vec_env.py │ │ │ │ ├── multiprocessing_env.py │ │ │ │ ├── replay_buffer.py │ │ │ │ └── srollout.py │ │ │ ├── envs/ │ │ │ │ ├── Stag_Hunt_env.py │ │ │ │ ├── __init__.py │ │ │ │ ├── abstract.py │ │ │ │ └── constants.py │ │ │ ├── main_spiking.py │ │ │ ├── network/ │ │ │ │ ├── __init__.py │ │ │ │ └── spiking_net.py │ │ │ ├── policy/ │ │ │ │ ├── __init__.py │ │ │ │ ├── dqn.py │ │ │ │ ├── stomvdn.py │ │ │ │ └── svdn.py │ │ │ ├── preprocessoing/ │ │ │ │ ├── __init__.py │ │ │ │ └── common.py │ │ │ └── runner.py │ │ ├── ReadMe.md │ │ ├── SmashVat/ │ │ │ ├── dqn.py │ │ │ ├── environment.py │ │ │ ├── main.py │ │ │ ├── manual_control.py │ │ │ ├── qnets.py │ │ │ ├── side_effect_eval.py │ │ │ └── window.py │ │ ├── ToCM/ │ │ │ ├── README.md │ │ │ ├── agent/ │ │ │ │ ├── controllers/ │ │ │ │ │ └── ToCMController.py │ │ │ │ ├── learners/ │ │ │ │ │ └── ToCMLearner.py │ │ │ │ ├── memory/ │ │ │ │ │ └── ToCMMemory.py │ │ │ │ ├── models/ │ │ │ │ │ └── ToCMModel.py │ │ │ │ ├── optim/ │ │ │ │ │ ├── loss.py │ │ │ │ │ └── utils.py │ │ │ │ ├── runners/ │ │ │ │ │ └── ToCMRunner.py │ │ │ │ ├── utils/ │ │ │ │ │ └── params.py │ │ │ │ └── workers/ │ │ │ │ └── ToCMWorker.py │ │ │ ├── configs/ │ │ │ │ ├── Config.py │ │ │ │ ├── EnvConfigs.py │ │ │ │ ├── Experiment.py │ │ │ │ ├── ToCM/ │ │ │ │ │ ├── ToCMAgentConfig.py │ │ │ │ │ ├── ToCMControllerConfig.py │ │ │ │ │ ├── ToCMLearnerConfig.py │ │ │ │ │ └── optimal/ │ │ │ │ │ └── starcraft/ │ │ │ │ │ ├── AgentConfig.py │ │ │ │ │ └── LearnerConfig.py │ │ │ │ └── __init__.py │ │ │ ├── env/ │ │ │ │ ├── mpe/ │ │ │ │ │ └── MPE.py │ │ │ │ └── starcraft/ │ │ │ │ └── StarCraft.py │ │ │ ├── environments.py │ │ │ ├── mpe/ │ │ │ │ ├── MPE_Env.py │ │ │ │ ├── __init__.py │ │ │ │ ├── core.py │ │ │ │ ├── environment.py │ │ │ │ ├── multi_discrete.py │ │ │ │ ├── rendering.py │ │ │ │ ├── scenario.py │ │ │ │ └── scenarios/ │ │ │ │ ├── __init__.py │ │ │ │ ├── hetero_spread.py │ │ │ │ ├── simple_adversary.py │ │ │ │ ├── simple_crypto.py │ │ │ │ ├── simple_crypto_display.py │ │ │ │ ├── simple_push.py │ │ │ │ ├── simple_reference.py │ │ │ │ ├── simple_speaker_listener.py │ │ │ │ ├── simple_spread.py │ │ │ │ ├── simple_tag.py │ │ │ │ └── simple_world_comm.py │ │ │ ├── networks/ │ │ │ │ ├── ToCM/ │ │ │ │ │ ├── action.py │ │ │ │ │ ├── critic.py │ │ │ │ │ ├── dense.py │ │ │ │ │ ├── rnns.py │ │ │ │ │ ├── utils.py │ │ │ │ │ └── vae.py │ │ │ │ └── transformer/ │ │ │ │ └── layers.py │ │ │ ├── requirements.txt │ │ │ ├── run.sh │ │ │ ├── smac/ │ │ │ │ ├── __init__.py │ │ │ │ ├── bin/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── map_list.py │ │ │ │ ├── env/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── multiagentenv.py │ │ │ │ │ ├── pettingzoo/ │ │ │ │ │ │ ├── StarCraft2PZEnv.py │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── test/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── all_test.py │ │ │ │ │ │ └── smac_pettingzoo_test.py │ │ │ │ │ └── starcraft2/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── maps/ │ │ │ │ │ │ ├── SMAC_Maps/ │ │ │ │ │ │ │ └── 2s_vs_1sc.SC2Map │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── smac_maps.py │ │ │ │ │ ├── render.py │ │ │ │ │ └── starcraft2.py │ │ │ │ └── examples/ │ │ │ │ ├── __init__.py │ │ │ │ ├── pettingzoo/ │ │ │ │ │ ├── README.rst │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── pettingzoo_demo.py │ │ │ │ ├── random_agents.py │ │ │ │ └── rllib/ │ │ │ │ ├── README.rst │ │ │ │ ├── __init__.py │ │ │ │ ├── env.py │ │ │ │ ├── model.py │ │ │ │ ├── run_ppo.py │ │ │ │ └── run_qmix.py │ │ │ ├── train.py │ │ │ └── utils/ │ │ │ ├── __init__.py │ │ │ ├── mlp_buffer.py │ │ │ ├── mlp_nstep_buffer.py │ │ │ ├── popart.py │ │ │ ├── rec_buffer.py │ │ │ ├── segment_tree.py │ │ │ └── util.py │ │ ├── ToM/ │ │ │ ├── BrainArea/ │ │ │ │ ├── PFC_ToM.py │ │ │ │ ├── TPJ.py │ │ │ │ ├── __init__.py │ │ │ │ ├── dACC.py │ │ │ │ ├── one_hot.py │ │ │ │ └── test.py │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── data/ │ │ │ │ ├── NPC_assessment.csv │ │ │ │ ├── agent_assessment.csv │ │ │ │ ├── injury_memory.txt │ │ │ │ ├── injury_value.txt │ │ │ │ └── one_hot.py │ │ │ ├── env/ │ │ │ │ ├── __init__.py │ │ │ │ ├── env.py │ │ │ │ ├── env3_train_env00.py │ │ │ │ └── env3_train_env01.py │ │ │ ├── main_ToM.py │ │ │ ├── main_both.py │ │ │ ├── rulebasedpolicy/ │ │ │ │ ├── Find_a_way.py │ │ │ │ ├── __init__.py │ │ │ │ ├── a_star.py │ │ │ │ ├── load_statedata.py │ │ │ │ ├── point.py │ │ │ │ ├── random_map.py │ │ │ │ ├── statedata_pre.py │ │ │ │ ├── train.txt │ │ │ │ └── world_model.py │ │ │ └── utils/ │ │ │ ├── Encoder.py │ │ │ └── one_hot.py │ │ ├── affective_empathy/ │ │ │ ├── BAE-SNN/ │ │ │ │ ├── BAESNN.py │ │ │ │ ├── README.md │ │ │ │ ├── env_poly.py │ │ │ │ └── env_two_poly.py │ │ │ ├── BEEAD-SNN/ │ │ │ │ ├── BEEAD-SNN.py │ │ │ │ ├── README.md │ │ │ │ ├── RL_Brain.py │ │ │ │ ├── env.py │ │ │ │ ├── env_poly_SNN.py │ │ │ │ ├── rsnn.py │ │ │ │ ├── sd_env.py │ │ │ │ └── snowdrift_main.py │ │ │ └── BRP-SNN/ │ │ │ ├── BRP-SNN.py │ │ │ ├── README.md │ │ │ ├── env_poly_SNN.py │ │ │ └── env_two_poly_SNN.py │ │ └── mirror_test/ │ │ ├── README.md │ │ └── mirror_test.py │ ├── Spiking-Transformers/ │ │ ├── LIFNode.py │ │ ├── README.md │ │ ├── datasets.py │ │ ├── main.py │ │ └── models/ │ │ ├── spike_driven_transformer.py │ │ ├── spike_driven_transformer_dvs.py │ │ ├── spike_driven_transformer_v2.py │ │ ├── spike_driven_transformer_v2_dvs.py │ │ ├── spikformer.py │ │ └── spikformer_dvs.py │ ├── Structural_Development/ │ │ ├── DPAP/ │ │ │ ├── README.md │ │ │ ├── mask_model.py │ │ │ ├── prun_main.py │ │ │ └── utils.py │ │ ├── DSD-SNN/ │ │ │ ├── README.md │ │ │ └── cifar100/ │ │ │ ├── available.py │ │ │ ├── main_simplified.py │ │ │ ├── manipulate.py │ │ │ ├── maskcl2.py │ │ │ └── vgg_snn.py │ │ ├── ELSM/ │ │ │ ├── evolve.py │ │ │ ├── lsm.py │ │ │ ├── model.py │ │ │ ├── nsganet.py │ │ │ └── spikes.py │ │ ├── SCA-SNN/ │ │ │ ├── README.md │ │ │ ├── configs/ │ │ │ │ └── train.yaml │ │ │ ├── inclearn/ │ │ │ │ ├── __init__.py │ │ │ │ ├── convnet/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── classifier.py │ │ │ │ │ ├── imbalance.py │ │ │ │ │ ├── maskcl2.py │ │ │ │ │ ├── network.py │ │ │ │ │ ├── resnet.py │ │ │ │ │ ├── sew_resnet.py │ │ │ │ │ └── utils.py │ │ │ │ ├── datasets/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── data.py │ │ │ │ │ └── dataset.py │ │ │ │ ├── models/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base.py │ │ │ │ │ └── incmodel.py │ │ │ │ └── tools/ │ │ │ │ ├── __init__.py │ │ │ │ ├── autoaugment_extra.py │ │ │ │ ├── cutout.py │ │ │ │ ├── data_utils.py │ │ │ │ ├── factory.py │ │ │ │ ├── memory.py │ │ │ │ ├── metrics.py │ │ │ │ ├── results_utils.py │ │ │ │ ├── scheduler.py │ │ │ │ ├── similar.py │ │ │ │ └── utils.py │ │ │ └── main.py │ │ └── SD-SNN/ │ │ ├── README.md │ │ ├── main.py │ │ ├── prun_and_generation.py │ │ ├── snn_model.py │ │ └── utils.py │ ├── Structure_Evolution/ │ │ ├── Adaptive_lsm/ │ │ │ ├── BrainCog-Version/ │ │ │ │ ├── README.md │ │ │ │ ├── brid.py │ │ │ │ ├── lsmmodel.py │ │ │ │ ├── maze.py │ │ │ │ └── tools/ │ │ │ │ ├── EnuGlobalNetwork.py │ │ │ │ ├── ExperimentEnvGlobalNetworkSurvival.py │ │ │ │ ├── MazeTurnEnvVec.py │ │ │ │ └── nsganet.py │ │ │ └── raw/ │ │ │ ├── BCM.py │ │ │ ├── README.md │ │ │ ├── lstm.py │ │ │ ├── main.py │ │ │ ├── pltbcm.py │ │ │ ├── pltrank.py │ │ │ ├── q_l.py │ │ │ └── tools/ │ │ │ ├── EnuGlobalNetwork.py │ │ │ ├── ExperimentEnvGlobalNetworkSurvival.py │ │ │ └── MazeTurnEnvVec.py │ │ ├── EB-NAS/ │ │ │ ├── acc_predictor/ │ │ │ │ ├── adaptive_switching.py │ │ │ │ ├── carts.py │ │ │ │ ├── factory.py │ │ │ │ ├── gp.py │ │ │ │ ├── mlp.py │ │ │ │ └── rbf.py │ │ │ ├── cellmodel.py │ │ │ ├── ebnas.py │ │ │ ├── micro_encoding.py │ │ │ ├── motifs.py │ │ │ ├── nsganet.py │ │ │ ├── operations.py │ │ │ ├── readme.md │ │ │ ├── single_genome.py │ │ │ └── tm.py │ │ ├── ELSM/ │ │ │ ├── README.md │ │ │ ├── evolve.py │ │ │ ├── lsm.py │ │ │ ├── model.py │ │ │ ├── nsganet.py │ │ │ └── spikes.py │ │ └── MSE-NAS/ │ │ ├── auto_augment.py │ │ ├── cellmodel.py │ │ ├── evolution.py │ │ ├── loss_f.py │ │ ├── micro_encoding.py │ │ ├── motifs.py │ │ ├── nsganet.py │ │ ├── obj.py │ │ ├── operations.py │ │ ├── readme.md │ │ ├── tm.py │ │ └── utils.py │ ├── TIM/ │ │ ├── README.md │ │ ├── main.py │ │ ├── models/ │ │ │ ├── TIM.py │ │ │ ├── spikformer_braincog_DVS.py │ │ │ └── spikformer_braincog_SHD.py │ │ └── utils/ │ │ ├── MyGrad.py │ │ ├── MyNode.py │ │ └── datasets.py │ └── decision_making/ │ ├── BDM-SNN/ │ │ ├── BDM-SNN-UAV.py │ │ ├── BDM-SNN-hh.py │ │ ├── BDM-SNN.py │ │ ├── README.md │ │ └── decisionmaking.py │ ├── RL/ │ │ ├── README.md │ │ ├── atari/ │ │ │ ├── __init__.py │ │ │ └── atari_wrapper.py │ │ ├── mcs-fqf/ │ │ │ ├── discrete.py │ │ │ ├── main.py │ │ │ ├── network.py │ │ │ └── policy.py │ │ ├── requirements.txt │ │ ├── sdqn/ │ │ │ ├── main.py │ │ │ └── network.py │ │ └── utils/ │ │ ├── __init__.py │ │ └── normalization.py │ └── swarm/ │ ├── Collision-Avoidance.py │ └── README.md ├── requirements.txt └── setup.py
Showing preview only (388K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (5174 symbols across 449 files)
FILE: braincog/base/brainarea/BrainArea.py
class BrainArea (line 19) | class BrainArea(nn.Module, abc.ABC):
method __init__ (line 25) | def __init__(self):
method forward (line 31) | def forward(self, x):
method reset (line 39) | def reset(self):
class ThreePointForward (line 48) | class ThreePointForward(BrainArea):
method __init__ (line 53) | def __init__(self, w1, w2, w3):
method forward (line 66) | def forward(self, x):
class Feedback (line 78) | class Feedback(BrainArea):
method __init__ (line 83) | def __init__(self, w1, w2, w3):
method forward (line 96) | def forward(self, x):
method reset (line 106) | def reset(self):
class TwoInOneOut (line 110) | class TwoInOneOut(BrainArea):
method __init__ (line 115) | def __init__(self, w1, w2):
method forward (line 126) | def forward(self, x1, x2):
class SelfConnectionArea (line 136) | class SelfConnectionArea(BrainArea):
method __init__ (line 141) | def __init__(self, w1, w2 ):
method forward (line 153) | def forward(self, x):
method reset (line 163) | def reset(self):
FILE: braincog/base/brainarea/IPL.py
class IPLNet (line 29) | class IPLNet(nn.Module):
method __init__ (line 34) | def __init__(self, connection):
method forward (line 52) | def forward(self, input1, input2): # input from vPMC and STS
method UpdateWeight (line 67) | def UpdateWeight(self, i, dw):
method reset (line 76) | def reset(self):
method getweight (line 86) | def getweight(self):
FILE: braincog/base/brainarea/Insula.py
class InsulaNet (line 27) | class InsulaNet(nn.Module):
method __init__ (line 31) | def __init__(self,connection):
method forward (line 44) | def forward(self, input1, input2): # input from IPLv and STS
method UpdateWeight (line 54) | def UpdateWeight(self,i,dw):
method reset (line 63) | def reset(self):
method getweight (line 73) | def getweight(self):
FILE: braincog/base/brainarea/PFC.py
class PFC (line 7) | class PFC:
method __init__ (line 11) | def __init__(self):
method forward (line 16) | def forward(self, x):
method reset (line 24) | def reset(self):
class dlPFC (line 33) | class dlPFC(BaseModule, PFC):
method __init__ (line 37) | def __init__(self,
method _rest_c (line 52) | def _rest_c(self):
method _create_fc (line 56) | def _create_fc(self):
FILE: braincog/base/brainarea/basalganglia.py
class basalganglia (line 21) | class basalganglia(nn.Module):
method __init__ (line 26) | def __init__(self, ns, na, we, wi, node_type):
method forward (line 104) | def forward(self, input):
method UpdateWeight (line 117) | def UpdateWeight(self, i, dw):
method reset (line 127) | def reset(self):
method getweight (line 137) | def getweight(self):
method getmask (line 144) | def getmask(self):
FILE: braincog/base/brainarea/dACC.py
class dACC (line 17) | class dACC(BaseModule):
method __init__ (line 21) | def __init__(self,
method _rest_c (line 42) | def _rest_c(self):
method _create_fc (line 46) | def _create_fc(self):
method update_c (line 55) | def update_c(self, c, STDP, tau_c=0.2):
method forward (line 70) | def forward(self, inputs, epoch):
FILE: braincog/base/connection/CustomLinear.py
class CustomLinear (line 11) | class CustomLinear(nn.Module):
method __init__ (line 16) | def __init__(self, weight, mask=None):
method forward (line 22) | def forward(self, x: torch.Tensor):
method update (line 31) | def update(self, dw):
FILE: braincog/base/connection/layer.py
class VotingLayer (line 13) | class VotingLayer(nn.Module):
method __init__ (line 19) | def __init__(self, voter_num: int):
method forward (line 23) | def forward(self, x: torch.Tensor):
class WTALayer (line 29) | class WTALayer(nn.Module):
method __init__ (line 34) | def __init__(self, k=1):
method forward (line 38) | def forward(self, x: torch.Tensor):
class NDropout (line 50) | class NDropout(nn.Module):
method __init__ (line 55) | def __init__(self, p):
method n_reset (line 60) | def n_reset(self):
method create_mask (line 67) | def create_mask(self, x):
method forward (line 75) | def forward(self, x):
class WSConv2d (line 85) | class WSConv2d(nn.Conv2d):
method __init__ (line 87) | def __init__(self, in_channels, out_channels, kernel_size, stride=1,
method forward (line 97) | def forward(self, x):
class ThresholdDependentBatchNorm2d (line 108) | class ThresholdDependentBatchNorm2d(_BatchNorm):
method __init__ (line 114) | def __init__(self, num_features, alpha: float, threshold: float = .5, ...
method _check_input_dim (line 126) | def _check_input_dim(self, input):
method forward (line 130) | def forward(self, input):
class TEBN (line 136) | class TEBN(nn.Module):
method __init__ (line 137) | def __init__(self, num_features,step, eps=1e-5, momentum=0.1,**kwargs):
method forward (line 142) | def forward(self, input):
class LayerNorm (line 153) | class LayerNorm(nn.Module):
method __init__ (line 160) | def __init__(self, normalized_shape, eps=1e-6, data_format="channels_l...
method forward (line 170) | def forward(self, x):
class SMaxPool (line 181) | class SMaxPool(nn.Module):
method __init__ (line 189) | def __init__(self, child):
method forward (line 194) | def forward(self, x):
method reset (line 201) | def reset(self):
class LIPool (line 205) | class LIPool(nn.Module):
method __init__ (line 213) | def __init__(self, child=None):
method forward (line 221) | def forward(self, x):
method reset (line 227) | def reset(self):
class CustomLinear (line 231) | class CustomLinear(nn.Module):
method __init__ (line 233) | def __init__(self, in_channels, out_channels, bias=True):
method forward (line 253) | def forward(self, inputs):
FILE: braincog/base/conversion/convertor.py
class HookScale (line 9) | class HookScale(nn.Module):
method __init__ (line 16) | def __init__(self,
method forward (line 31) | def forward(self, x):
class Hookoutput (line 45) | class Hookoutput(nn.Module):
method __init__ (line 50) | def __init__(self, module):
method forward (line 55) | def forward(self, x):
class Scale (line 61) | class Scale(nn.Module):
method __init__ (line 66) | def __init__(self, scale: float = 1.0):
method forward (line 70) | def forward(self, x):
function reset (line 77) | def reset(self):
class Convertor (line 90) | class Convertor(nn.Module):
method __init__ (line 111) | def __init__(self,
method forward (line 135) | def forward(self, model):
method register_hook (line 145) | def register_hook(model, p=0.99, channelnorm=False, gamma=0.999):
method get_percentile (line 160) | def get_percentile(model, dataloader, device, batch_num=1):
method replace_for_spike (line 172) | def replace_for_spike(model, lipool=True, soft_mode=True, gamma=1, spi...
class SNode (line 194) | class SNode(nn.Module):
method __init__ (line 201) | def __init__(self, soft_mode=False, gamma=5):
method forward (line 210) | def forward(self, x):
method hard_reset (line 218) | def hard_reset(self):
method soft_reset (line 224) | def soft_reset(self):
method reset (line 230) | def reset(self):
FILE: braincog/base/conversion/merge.py
function mergeConvBN (line 5) | def mergeConvBN(m):
function merge (line 26) | def merge(conv, bn):
FILE: braincog/base/conversion/spicalib.py
class SpiCalib (line 5) | class SpiCalib(nn.Module):
method __init__ (line 6) | def __init__(self, allowance):
method forward (line 12) | def forward(self, x):
method reset (line 33) | def reset(self):
FILE: braincog/base/encoder/encoder.py
class AutoEncoder (line 7) | class AutoEncoder(nn.Module):
method __init__ (line 8) | def __init__(self, step, spike_output=True):
method forward (line 21) | def forward(self, x):
class Encoder (line 45) | class Encoder(nn.Module):
method __init__ (line 56) | def __init__(self, step, encode_type='ttfs', *args, **kwargs):
method forward (line 68) | def forward(self, inputs, deletion_prob=None, shift_var=None):
method direct (line 97) | def direct(self, inputs):
method auto (line 107) | def auto(self, inputs):
method ttfs (line 115) | def ttfs(self, inputs):
method rate (line 131) | def rate(self, inputs):
method phase (line 141) | def phase(self, inputs):
method delete (line 162) | def delete(self, inputs, prob):
method shift (line 175) | def shift(self, inputs, var):
FILE: braincog/base/encoder/population_coding.py
class PEncoder (line 5) | class PEncoder(nn.Module):
method __init__ (line 11) | def __init__(self, step, encode_type):
method forward (line 16) | def forward(self, inputs, num_popneurons, *args, **kwargs):
method population_time (line 21) | def population_time(self, inputs, m):
method population_voltage (line 62) | def population_voltage(self, inputs, m, VTH):
FILE: braincog/base/encoder/qs_coding.py
class QSEncoder (line 8) | class QSEncoder:
method __init__ (line 19) | def __init__(self,
method __call__ (line 37) | def __call__(self, image, image_delta, image_ori, image_ori_delta):
method shift_trans (line 56) | def shift_trans(self, image, image_delta, image_ori, image_ori_delta):
method noise_trans (line 93) | def noise_trans(self, image, image_ori, image_ori_delta):
method reverse_pixels (line 131) | def reverse_pixels(self, image, image_delta, noise_rate, flip_bits=None):
FILE: braincog/base/learningrule/BCM.py
class BCM (line 20) | class BCM(nn.Module):
method __init__ (line 25) | def __init__(self, node, connection, cfunc=None, weightdecay=0.99, tau...
method forward (line 43) | def forward(self, *x):
method cfunc (line 62) | def cfunc(self, s):
method reset (line 67) | def reset(self):
FILE: braincog/base/learningrule/Hebb.py
class Hebb (line 20) | class Hebb(nn.Module):
method __init__ (line 25) | def __init__(self, node, connection):
method forward (line 36) | def forward(self, *x):
method reset (line 54) | def reset(self):
FILE: braincog/base/learningrule/RSTDP.py
class RSTDP (line 20) | class RSTDP(nn.Module):
method __init__ (line 24) | def __init__(self, node, connection, decay=0.99, reward_decay=0.5):
method forward (line 40) | def forward(self, *x, r):
method cal_trace (line 49) | def cal_trace(self, x):
method reset (line 61) | def reset(self):
FILE: braincog/base/learningrule/STDP.py
class STDP (line 20) | class STDP(nn.Module):
method __init__ (line 25) | def __init__(self, node, connection, decay=0.99):
method forward (line 37) | def forward(self, x):
method cal_trace (line 55) | def cal_trace(self, x):
method reset (line 66) | def reset(self):
class MutliInputSTDP (line 73) | class MutliInputSTDP(nn.Module):
method __init__ (line 78) | def __init__(self, node, connection, decay=0.99):
method forward (line 90) | def forward(self, *x):
method cal_trace (line 112) | def cal_trace(self, x):
method reset (line 124) | def reset(self):
class LTP (line 131) | class LTP(MutliInputSTDP):
class LTD (line 138) | class LTD(nn.Module):
method __init__ (line 143) | def __init__(self, node, connection, decay=0.99):
method forward (line 155) | def forward(self, *x):
method cal_trace (line 174) | def cal_trace(self, x):
method reset (line 186) | def reset(self):
class FullSTDP (line 193) | class FullSTDP(nn.Module):
method __init__ (line 198) | def __init__(self, node, connection, decay=0.99, decay2=0.99):
method forward (line 212) | def forward(self, *x):
method cal_tracein (line 239) | def cal_tracein(self, x):
method cal_traceout (line 251) | def cal_traceout(self, x):
method reset (line 263) | def reset(self):
FILE: braincog/base/learningrule/STP.py
class short_time (line 4) | class short_time():
method __init__ (line 11) | def __init__(self, SizeHistOutput):
method syndepr (line 16) | def syndepr(self, Syn=None, ISI=None, Nsp=None):
method set_gsyn (line 31) | def set_gsyn(self, np=None, dt=None, v=None, NoiseSyn=None):
method IDderiv (line 74) | def IDderiv(self, np=None, v=None, dt=None, dv=None, NoiseSyn=None, fl...
method update (line 146) | def update(self, np=None, dt=None, NoiseSyn=None, flag_dv=None):
FILE: braincog/base/node/node.py
class BaseNode (line 25) | class BaseNode(nn.Module, abc.ABC):
method __init__ (line 42) | def __init__(self,
method calc_spike (line 72) | def calc_spike(self):
method integral (line 80) | def integral(self, inputs):
method get_thres (line 90) | def get_thres(self):
method rearrange2node (line 93) | def rearrange2node(self, inputs):
method rearrange2op (line 118) | def rearrange2op(self, inputs):
method forward (line 141) | def forward(self, inputs):
method n_reset (line 199) | def n_reset(self):
method get_n_attr (line 208) | def get_n_attr(self, attr):
method set_n_warm_up (line 215) | def set_n_warm_up(self, flag):
method set_n_threshold (line 223) | def set_n_threshold(self, thresh):
method set_n_tau (line 231) | def set_n_tau(self, tau):
class BaseMCNode (line 244) | class BaseMCNode(nn.Module, abc.ABC):
method __init__ (line 251) | def __init__(self,
method calc_spike (line 267) | def calc_spike(self):
method integral (line 270) | def integral(self, inputs):
method forward (line 273) | def forward(self, inputs: dict):
method n_reset (line 285) | def n_reset(self):
method get_n_fire_rate (line 290) | def get_n_fire_rate(self):
method set_n_warm_up (line 295) | def set_n_warm_up(self, flag):
method set_n_threshold (line 298) | def set_n_threshold(self, thresh):
class ThreeCompNode (line 302) | class ThreeCompNode(BaseMCNode):
method __init__ (line 313) | def __init__(self,
method integral (line 329) | def integral(self, basal_inputs, apical_inputs):
method calc_spike (line 341) | def calc_spike(self):
class ReLUNode (line 351) | class ReLUNode(BaseNode):
method __init__ (line 356) | def __init__(self,
method forward (line 362) | def forward(self, x):
method calc_spike (line 375) | def calc_spike(self):
class BiasReLUNode (line 379) | class BiasReLUNode(BaseNode):
method __init__ (line 384) | def __init__(self,
method forward (line 390) | def forward(self, x):
method calc_spike (line 396) | def calc_spike(self):
class IFNode (line 402) | class IFNode(BaseNode):
method __init__ (line 419) | def __init__(self, threshold=.5, act_fun=AtanGrad, *args, **kwargs):
method integral (line 431) | def integral(self, inputs):
method calc_spike (line 434) | def calc_spike(self):
class LIFNode (line 439) | class LIFNode(BaseNode):
method __init__ (line 457) | def __init__(self, threshold=0.5, tau=2., act_fun=QGateGrad, *args, **...
method integral (line 467) | def integral(self, inputs):
method calc_spike (line 470) | def calc_spike(self):
class BurstLIFNode (line 475) | class BurstLIFNode(LIFNode):
method __init__ (line 476) | def __init__(self, threshold=.5, tau=2., act_fun=RoundGrad, *args, **k...
method calc_spike (line 480) | def calc_spike(self):
class BackEINode (line 486) | class BackEINode(BaseNode):
method __init__ (line 496) | def __init__(self, threshold=0.5, decay=0.2, act_fun=BackEIGateGrad, t...
method integral (line 515) | def integral(self, inputs):
method calc_spike (line 525) | def calc_spike(self):
method n_reset (line 535) | def n_reset(self):
class NoiseLIFNode (line 542) | class NoiseLIFNode(LIFNode):
method __init__ (line 563) | def __init__(self,
method integral (line 583) | def integral(self, inputs): # b, c, w, h / b, c
class BiasLIFNode (line 594) | class BiasLIFNode(BaseNode):
method __init__ (line 614) | def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kw...
method integral (line 621) | def integral(self, inputs):
method calc_spike (line 624) | def calc_spike(self):
class LIFSTDPNode (line 629) | class LIFSTDPNode(BaseNode):
method __init__ (line 634) | def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kw...
method integral (line 641) | def integral(self, inputs):
method calc_spike (line 644) | def calc_spike(self):
method requires_activation (line 649) | def requires_activation(self):
class PLIFNode (line 653) | class PLIFNode(BaseNode):
method __init__ (line 672) | def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kw...
method integral (line 680) | def integral(self, inputs):
method calc_spike (line 683) | def calc_spike(self):
class PSU (line 688) | class PSU(BaseNode):
method __init__ (line 689) | def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kw...
method integral (line 703) | def integral(self, inputs):
method calc_spike (line 707) | def calc_spike(self):
class IPSU (line 711) | class IPSU(BaseNode):
method masked_weight (line 712) | def masked_weight(self):
method __init__ (line 715) | def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kw...
method integral (line 736) | def integral(self, inputs):
method calc_spike (line 740) | def calc_spike(self):
class RPSU (line 744) | class RPSU(BaseNode):
method masked_weight (line 745) | def masked_weight(self):
method __init__ (line 748) | def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kw...
method integral (line 769) | def integral(self, inputs):
method calc_spike (line 774) | def calc_spike(self):
class SPSN (line 778) | class SPSN(BaseNode):
method __init__ (line 779) | def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kw...
method integral (line 790) | def integral(self, inputs):
method calc_spike (line 793) | def calc_spike(self):
class NoisePLIFNode (line 797) | class NoisePLIFNode(PLIFNode):
method __init__ (line 815) | def __init__(self,
method integral (line 835) | def integral(self, inputs): # b, c, w, h / b, c
class BiasPLIFNode (line 845) | class BiasPLIFNode(BaseNode):
method __init__ (line 863) | def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kw...
method integral (line 871) | def integral(self, inputs):
method calc_spike (line 874) | def calc_spike(self):
class DoubleSidePLIFNode (line 879) | class DoubleSidePLIFNode(LIFNode):
method __init__ (line 897) | def __init__(self,
method calc_spike (line 908) | def calc_spike(self):
class IzhNode (line 913) | class IzhNode(BaseNode):
method __init__ (line 926) | def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kw...
method integral (line 949) | def integral(self, inputs):
method calc_spike (line 953) | def calc_spike(self):
method n_reset (line 958) | def n_reset(self):
class IzhNodeMU (line 964) | class IzhNodeMU(BaseNode):
method __init__ (line 977) | def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kw...
method integral (line 991) | def integral(self, inputs):
method calc_spike (line 995) | def calc_spike(self):
method n_reset (line 1000) | def n_reset(self):
method requires_activation (line 1005) | def requires_activation(self):
class DGLIFNode (line 1009) | class DGLIFNode(BaseNode):
method __init__ (line 1016) | def __init__(self, threshold=.5, tau=2., *args, **kwargs):
method integral (line 1021) | def integral(self, inputs):
method calc_spike (line 1025) | def calc_spike(self):
class HTDGLIFNode (line 1035) | class HTDGLIFNode(IFNode):
method __init__ (line 1042) | def __init__(self, threshold=.5, tau=2., *args, **kwargs):
method calc_spike (line 1046) | def calc_spike(self):
method forward (line 1058) | def forward(self, inputs):
class SimHHNode (line 1065) | class SimHHNode(BaseNode):
method __init__ (line 1078) | def __init__(self, threshold=50., tau=2., act_fun=AtanGrad, *args, **k...
method integral (line 1093) | def integral(self, inputs):
method calc_spike (line 1116) | def calc_spike(self):
method forward (line 1120) | def forward(self, inputs):
method n_reset (line 1125) | def n_reset(self):
method requires_activation (line 1130) | def requires_activation(self):
class CTIzhNode (line 1134) | class CTIzhNode(IzhNode):
method __init__ (line 1135) | def __init__(self, threshold=1., tau=2., act_fun=AtanGrad, *args, **kw...
method integral (line 1163) | def integral(self, inputs):
method calc_spike (line 1168) | def calc_spike(self):
method spreadMarkPostNeurons (line 1175) | def spreadMarkPostNeurons(self):
class adth (line 1183) | class adth(BaseNode):
method __init__ (line 1190) | def __init__(self, *args, **kwargs):
method adthNode (line 1193) | def adthNode(self, v, dt, c_m, g_m, alpha_w, ad, Ieff, Ichem, Igap, ta...
method calc_spike (line 1207) | def calc_spike(self):
class HHNode (line 1211) | class HHNode(BaseNode):
method __init__ (line 1218) | def __init__(self, p, dt, device, act_fun=AtanGrad, *args, **kwargs):
method integral (line 1251) | def integral(self, inputs):
method calc_spike (line 1271) | def calc_spike(self):
method forward (line 1274) | def forward(self, inputs):
method requires_activation (line 1279) | def requires_activation(self):
class aEIF (line 1283) | class aEIF(BaseNode):
method __init__ (line 1291) | def __init__(self, p, dt, device, *args, **kwargs):
method integral (line 1321) | def integral(self, inputs):
method calc_spike (line 1331) | def calc_spike(self):
method forward (line 1337) | def forward(self, inputs):
class LIAFNode (line 1345) | class LIAFNode(BaseNode):
method __init__ (line 1353) | def __init__(self, spike_act=BackEIGateGrad(), act_fun="SELU", thresho...
method integral (line 1362) | def integral(self, inputs):
method calc_spike (line 1365) | def calc_spike(self):
class OnlineLIFNode (line 1376) | class OnlineLIFNode(BaseNode):
method __init__ (line 1387) | def __init__(self, threshold=0.5, tau=2., act_fun=QGateGrad, init=Fals...
method integral (line 1397) | def integral(self, inputs):
method calc_spike (line 1403) | def calc_spike(self):
class AdaptiveNode (line 1412) | class AdaptiveNode(LIFNode):
method __init__ (line 1414) | def __init__(self, threshold=1., act_fun=QGateGrad, step=10, spike_out...
method init_weight (line 1459) | def init_weight(self):
method forward (line 1466) | def forward(self, inputs): # (t b) c w h
method n_reset (line 1500) | def n_reset(self):
FILE: braincog/base/strategy/LateralInhibition.py
class LateralInhibition (line 7) | class LateralInhibition(nn.Module):
method __init__ (line 11) | def __init__(self, node, inh, mode="constant"):
method forward (line 17) | def forward(self, x: torch.Tensor, xori=None):
FILE: braincog/base/strategy/surrogate.py
function heaviside (line 8) | def heaviside(x):
class SurrogateFunctionBase (line 12) | class SurrogateFunctionBase(nn.Module):
method __init__ (line 19) | def __init__(self, alpha, requires_grad=True):
method act_fun (line 26) | def act_fun(x, alpha):
method forward (line 34) | def forward(self, x):
class sigmoid (line 47) | class sigmoid(torch.autograd.Function):
method forward (line 62) | def forward(ctx, x, alpha):
method backward (line 69) | def backward(ctx, grad_output):
class SigmoidGrad (line 77) | class SigmoidGrad(SurrogateFunctionBase):
method __init__ (line 78) | def __init__(self, alpha=1., requires_grad=False):
method act_fun (line 82) | def act_fun(x, alpha):
class atan (line 91) | class atan(torch.autograd.Function):
method forward (line 106) | def forward(ctx, inputs, alpha):
method backward (line 111) | def backward(ctx, grad_output):
class AtanGrad (line 126) | class AtanGrad(SurrogateFunctionBase):
method __init__ (line 127) | def __init__(self, alpha=2., requires_grad=True):
method act_fun (line 131) | def act_fun(x, alpha):
class gate (line 140) | class gate(torch.autograd.Function):
method forward (line 155) | def forward(ctx, x, alpha):
method backward (line 162) | def backward(ctx, grad_output):
class GateGrad (line 169) | class GateGrad(SurrogateFunctionBase):
method __init__ (line 170) | def __init__(self, alpha=2., requires_grad=False):
method act_fun (line 174) | def act_fun(x, alpha):
class quadratic_gate (line 183) | class quadratic_gate(torch.autograd.Function):
method forward (line 208) | def forward(ctx, x, alpha):
method backward (line 217) | def backward(ctx, grad_output):
class QGateGrad (line 224) | class QGateGrad(SurrogateFunctionBase):
method __init__ (line 225) | def __init__(self, alpha=2., requires_grad=False):
method act_fun (line 229) | def act_fun(x, alpha):
class relu_like (line 233) | class relu_like(torch.autograd.Function):
method forward (line 235) | def forward(ctx, x, alpha):
method backward (line 241) | def backward(ctx, grad_output):
class RoundGrad (line 250) | class RoundGrad(nn.Module):
method __init__ (line 251) | def __init__(self, **kwargs):
method forward (line 255) | def forward(self, x):
class ReLUGrad (line 259) | class ReLUGrad(SurrogateFunctionBase):
method __init__ (line 264) | def __init__(self, alpha=2., requires_grad=False):
method act_fun (line 268) | def act_fun(x, alpha):
class straight_through_estimator (line 277) | class straight_through_estimator(torch.autograd.Function):
method forward (line 284) | def forward(ctx, inputs):
method backward (line 290) | def backward(ctx, grad_output):
class stdp (line 297) | class stdp(torch.autograd.Function):
method forward (line 299) | def forward(ctx, inputs):
method backward (line 305) | def backward(ctx, grad_output):
class STDPGrad (line 310) | class STDPGrad(SurrogateFunctionBase):
method __init__ (line 311) | def __init__(self, alpha=2., requires_grad=False):
method act_fun (line 315) | def act_fun(x, alpha):
class backeigate (line 322) | class backeigate(torch.autograd.Function):
method forward (line 324) | def forward(ctx, input):
method backward (line 329) | def backward(ctx, grad_output):
class BackEIGateGrad (line 336) | class BackEIGateGrad(SurrogateFunctionBase):
method __init__ (line 337) | def __init__(self, alpha=2., requires_grad=False):
method act_fun (line 341) | def act_fun(x, alpha):
class ei (line 344) | class ei(torch.autograd.Function):
method forward (line 346) | def forward(ctx, input):
method backward (line 351) | def backward(ctx, grad_output):
class EIGrad (line 358) | class EIGrad(SurrogateFunctionBase):
method __init__ (line 359) | def __init__(self, alpha=2., requires_grad=False):
method act_fun (line 363) | def act_fun(x, alpha):
FILE: braincog/base/utils/__init__.py
function drop_path (line 12) | def drop_path(x, drop_prob):
FILE: braincog/base/utils/criterions.py
class UnilateralMse (line 5) | class UnilateralMse(torch.nn.Module):
method __init__ (line 10) | def __init__(self, thresh=1.):
method forward (line 15) | def forward(self, x, target):
class MixLoss (line 23) | class MixLoss(torch.nn.Module):
method __init__ (line 28) | def __init__(self, ce_loss):
method forward (line 33) | def forward(self, x, target):
class TetLoss (line 37) | class TetLoss(torch.nn.Module):
method __init__ (line 38) | def __init__(self, loss_fn):
method forward (line 42) | def forward(self, x, target):
class OnehotMse (line 50) | class OnehotMse(torch.nn.Module):
method __init__ (line 54) | def __init__(self, num_class):
method forward (line 59) | def forward(self, x, target):
FILE: braincog/base/utils/visualization.py
function spike_rate_vis_1d (line 31) | def spike_rate_vis_1d(data, output_dir=''):
function spike_rate_vis (line 46) | def spike_rate_vis(data, output_dir=''):
function plot_mem_distribution (line 61) | def plot_mem_distribution(data,
function plot_tsne (line 113) | def plot_tsne(x, colors,output_dir="", num_classes=None):
function plot_tsne_3d (line 141) | def plot_tsne_3d(x, colors,output_dir="", num_classes=None):
function plot_confusion_matrix (line 178) | def plot_confusion_matrix(logits, labels, output_dir):
FILE: braincog/datasets/CUB2002011.py
class CUB2002011 (line 9) | class CUB2002011(VisionDataset):
method __init__ (line 29) | def __init__(self, root, train=True, transform=None, target_transform=...
method _load_metadata (line 40) | def _load_metadata(self):
method _check_integrity (line 59) | def _check_integrity(self):
method _download (line 72) | def _download(self):
method __len__ (line 84) | def __len__(self):
method __getitem__ (line 87) | def __getitem__(self, idx):
FILE: braincog/datasets/ESimagenet/ES_imagenet.py
class ESImagenet_Dataset (line 13) | class ESImagenet_Dataset(data.Dataset):
method __init__ (line 14) | def __init__(self, mode, data_set_path='/data/dvsimagenet/', transform...
method __getitem__ (line 39) | def __getitem__(self, index):
method __len__ (line 74) | def __len__(self):
FILE: braincog/datasets/ESimagenet/reconstructed_ES_imagenet.py
class ESImagenet2D_Dataset (line 14) | class ESImagenet2D_Dataset(data.Dataset):
method __init__ (line 15) | def __init__(self, mode, data_set_path='/data/ESimagenet-0.18/', trans...
method __getitem__ (line 44) | def __getitem__(self, index):
method __len__ (line 90) | def __len__(self):
FILE: braincog/datasets/NOmniglot/NOmniglot.py
class NOmniglot (line 5) | class NOmniglot(Dataset):
method __init__ (line 6) | def __init__(self, root='data/', frames_num=12, train=True, data_type=...
method __len__ (line 40) | def __len__(self):
method __getitem__ (line 43) | def __getitem__(self, index):
method readimage (line 48) | def readimage(self, image, label):
FILE: braincog/datasets/NOmniglot/nomniglot_full.py
class NOmniglotfull (line 6) | class NOmniglotfull(Dataset):
method __init__ (line 12) | def __init__(self, root='data/', train=True, frames_num=4, data_type='...
method __len__ (line 29) | def __len__(self):
method __getitem__ (line 32) | def __getitem__(self, index):
FILE: braincog/datasets/NOmniglot/nomniglot_nw_ks.py
class NOmniglotNWayKShot (line 8) | class NOmniglotNWayKShot(Dataset):
method __init__ (line 15) | def __init__(self, root, n_way, k_shot, k_query, train=True, frames_nu...
method load_data_cache (line 26) | def load_data_cache(self, data_dict, length):
method __getitem__ (line 51) | def __getitem__(self, index):
method reset (line 65) | def reset(self):
method __len__ (line 68) | def __len__(self):
FILE: braincog/datasets/NOmniglot/nomniglot_pair.py
class NOmniglotTrainSet (line 10) | class NOmniglotTrainSet(Dataset):
method __init__ (line 16) | def __init__(self, root='data/', use_frame=True, frames_num=10, data_t...
method __len__ (line 26) | def __len__(self):
method __getitem__ (line 32) | def __getitem__(self, index):
class NOmniglotTestSet (line 68) | class NOmniglotTestSet(Dataset):
method __init__ (line 74) | def __init__(self, root='data/', time=1000, way=20, shot=1, query=1, u...
method __len__ (line 93) | def __len__(self):
method __getitem__ (line 100) | def __getitem__(self, index):
FILE: braincog/datasets/NOmniglot/utils.py
class FunctionThread (line 9) | class FunctionThread(threading.Thread):
method __init__ (line 10) | def __init__(self, f, *args, **kwargs):
method run (line 16) | def run(self):
function integrate_events_to_frames (line 20) | def integrate_events_to_frames(events, height, width, frames_num=10, dat...
function normalize_frame (line 71) | def normalize_frame(frames: np.ndarray or torch.Tensor, normalization: s...
function convert_events_dir_to_frames_dir (line 91) | def convert_events_dir_to_frames_dir(events_data_dir, frames_data_dir, s...
function convert_aedat4_dir_to_events_dir (line 135) | def convert_aedat4_dir_to_events_dir(root, train):
function num2str (line 178) | def num2str(idx):
function list_all_files (line 184) | def list_all_files(root, suffix, getlen=False):
function list_class_files (line 207) | def list_class_files(root, frames_kind_root, getlen=False, use_npz=False):
FILE: braincog/datasets/StanfordDogs.py
class StanfordDogs (line 9) | class StanfordDogs(VisionDataset):
method __init__ (line 25) | def __init__(self, root, train=True, transform=None, target_transform=...
method __len__ (line 44) | def __len__(self):
method __getitem__ (line 47) | def __getitem__(self, index):
method download (line 58) | def download(self):
method load_split (line 75) | def load_split(self):
method stats (line 87) | def stats(self):
FILE: braincog/datasets/TinyImageNet.py
class TinyImageNet (line 12) | class TinyImageNet(VisionDataset):
method __init__ (line 30) | def __init__(self, root, split='train', transform=None, target_transfo...
method _download (line 52) | def _download(self):
method _check_integrity (line 58) | def _check_integrity(self):
method __getitem__ (line 61) | def __getitem__(self, index):
method __len__ (line 72) | def __len__(self):
function find_classes (line 76) | def find_classes(class_file):
function make_dataset (line 86) | def make_dataset(root, base_folder, dirname, class_to_idx):
FILE: braincog/datasets/__init__.py
function is_dvs_data (line 33) | def is_dvs_data(dataset):
FILE: braincog/datasets/bullying10k/bullying10k.py
class BULLYINGDVS (line 11) | class BULLYINGDVS(Dataset):
method __init__ (line 20) | def __init__(self, save_to, transform=None, target_transform=None):
method __getitem__ (line 40) | def __getitem__(self, index: int) -> Tuple[Any, Any]:
method __len__ (line 67) | def __len__(self):
method _check_exists (line 70) | def _check_exists(self):
FILE: braincog/datasets/cut_mix.py
function event_difference (line 11) | def event_difference(x1, x2, kernel_size=3):
function onehot (line 18) | def onehot(size, target):
function rand_bbox_time (line 24) | def rand_bbox_time(size, rat):
function rand_bbox (line 38) | def rand_bbox(size, rat):
function calc_lam (line 61) | def calc_lam(x1, x2, bbt1, bbt2, bbx1, bbx2, bby1, bby2):
function rand_bbox_st (line 72) | def rand_bbox_st(size, rat):
function spatio_mask (line 80) | def spatio_mask(size, rat):
function temporal_mask (line 94) | def temporal_mask(size, rat):
function st_mask (line 101) | def st_mask(size, rat):
function GMM_mask_clip (line 112) | def GMM_mask_clip(size, rat):
function GMM_mask (line 123) | def GMM_mask(size, rat, n=None):
function calc_masked_lam (line 188) | def calc_masked_lam(x1, x2, mask):
function calc_masked_lam_with_difference (line 200) | def calc_masked_lam_with_difference(x1, x2, mix, kernel_size=3):
class MixUp (line 206) | class MixUp(Dataset):
method __init__ (line 207) | def __init__(self, dataset, num_class, num_mix=1, beta=1., prob=1.0, i...
method __getitem__ (line 217) | def __getitem__(self, index):
method __len__ (line 251) | def __len__(self):
class CutMix (line 255) | class CutMix(Dataset):
method __init__ (line 257) | def __init__(self, dataset, num_class, num_mix=1, beta=1., prob=1.0, i...
method __getitem__ (line 267) | def __getitem__(self, index):
method __len__ (line 333) | def __len__(self):
class EventMix (line 337) | class EventMix(Dataset):
method __init__ (line 339) | def __init__(self,
method __getitem__ (line 361) | def __getitem__(self, index):
method __len__ (line 420) | def __len__(self):
function get_proj (line 430) | def get_proj(self):
FILE: braincog/datasets/datasets.py
function unpack_mix_param (line 48) | def unpack_mix_param(args):
function build_transform (line 61) | def build_transform(is_train, img_size):
function build_dataset (line 105) | def build_dataset(is_train, img_size, dataset, path, same_da=False):
class MNISTData (line 131) | class MNISTData(object):
method __init__ (line 136) | def __init__(self,
method get_data_loaders (line 153) | def get_data_loaders(self):
method get_standard_data (line 167) | def get_standard_data(self):
function get_mnist_data (line 178) | def get_mnist_data(batch_size, num_workers=8, same_da=False,root=DATA_DI...
function get_fashion_data (line 225) | def get_fashion_data(batch_size, num_workers=8, same_da=False,root=DATA_...
function get_cifar10_data (line 258) | def get_cifar10_data(batch_size, num_workers=8, same_da=False,root=DATA_...
function get_cifar100_data (line 283) | def get_cifar100_data(batch_size, num_workers=8, same_data=False,root=DA...
function get_TinyImageNet_data (line 305) | def get_TinyImageNet_data(batch_size, num_workers=8, same_da=False,root=...
function get_imnet_data (line 338) | def get_imnet_data(args, _logger, data_config, num_aug_splits,root=DATA_...
function get_dvsg_data (line 428) | def get_dvsg_data(batch_size, step,root=DATA_DIR, **kwargs):
function get_bullyingdvs_data (line 528) | def get_bullyingdvs_data(batch_size, step, root=DATA_DIR, **kwargs):
function get_dvsc10_data (line 634) | def get_dvsc10_data(batch_size, step, root=DATA_DIR, **kwargs):
function get_NCALTECH101_data (line 755) | def get_NCALTECH101_data(batch_size, step,root=DATA_DIR, **kwargs):
function get_NCARS_data (line 884) | def get_NCARS_data(batch_size, step,root=DATA_DIR, **kwargs):
function get_nomni_data (line 982) | def get_nomni_data(batch_size, train_portion=1.,root=DATA_DIR, **kwargs):
function get_esimnet_data (line 1045) | def get_esimnet_data(batch_size, step,root=DATA_DIR, **kwargs):
function get_nmnist_data (line 1133) | def get_nmnist_data(batch_size, step, **kwargs):
function get_ntidigits_data (line 1233) | def get_ntidigits_data(batch_size, step, **kwargs):
function get_shd_data (line 1277) | def get_shd_data(batch_size, step, **kwargs):
function get_CUB2002011_data (line 1333) | def get_CUB2002011_data(batch_size, num_workers=8, same_da=False,root=DA...
function get_StanfordCars_data (line 1364) | def get_StanfordCars_data(batch_size, num_workers=8, same_da=False,root=...
function get_StanfordDogs_data (line 1395) | def get_StanfordDogs_data(batch_size, num_workers=8, same_da=False,root=...
function get_FGVCAircraft_data (line 1427) | def get_FGVCAircraft_data(batch_size, num_workers=8, same_da=False,root=...
function get_Flowers102_data (line 1459) | def get_Flowers102_data(batch_size, num_workers=8, same_da=False,root=DA...
function get_UCF101DVS_data (line 1491) | def get_UCF101DVS_data(batch_size, step, **kwargs):
function get_HMDBDVS_data (line 1597) | def get_HMDBDVS_data(batch_size, step, **kwargs):
FILE: braincog/datasets/hmdb_dvs/hmdb_dvs.py
class HMDBDVS (line 18) | class HMDBDVS(Dataset):
method __init__ (line 40) | def __init__(self, save_to, transform=None, target_transform=None):
method __getitem__ (line 69) | def __getitem__(self, index: int) -> Tuple[Any, Any]:
method __len__ (line 90) | def __len__(self):
method _check_exists (line 93) | def _check_exists(self):
FILE: braincog/datasets/ncaltech101/ncaltech101.py
class NCALTECH101 (line 17) | class NCALTECH101(Dataset):
method __init__ (line 60) | def __init__(self, save_to, transform=None, target_transform=None):
method __getitem__ (line 187) | def __getitem__(self, index):
method __len__ (line 202) | def __len__(self):
method _check_exists (line 205) | def _check_exists(self):
FILE: braincog/datasets/rand_aug.py
function ShearX (line 9) | def ShearX(x, v): # [-0.3, 0.3]
function ShearY (line 17) | def ShearY(x, v): # [-0.3, 0.3]
function TranslateX (line 25) | def TranslateX(x, v):
function TranslateY (line 35) | def TranslateY(x, v):
function Rotate (line 45) | def Rotate(x, v): # [-30, 30]
function CutoutAbs (line 53) | def CutoutAbs(x, v): # [0, 60] => percentage: [0, 0.2]
function CutoutTemporal (line 70) | def CutoutTemporal(x, v):
function TemporalShift (line 81) | def TemporalShift(x, v):
function SpatioShift (line 115) | def SpatioShift(x, v):
function drop (line 133) | def drop(x, v):
function GaussianBlur (line 147) | def GaussianBlur(x, v):
function SaltAndPepperNoise (line 153) | def SaltAndPepperNoise(x, v):
function Identity (line 161) | def Identity(x, v):
class RandAugment (line 182) | class RandAugment:
method __init__ (line 183) | def __init__(self, n, m):
method __call__ (line 188) | def __call__(self, x):
FILE: braincog/datasets/ucf101_dvs/ucf101_dvs.py
class UCF101DVS (line 27) | class UCF101DVS(Dataset):
method __init__ (line 49) | def __init__(self, save_to, train=False, transform=None, target_transf...
method __getitem__ (line 80) | def __getitem__(self, index: int) -> Tuple[Any, Any]:
method __len__ (line 101) | def __len__(self):
method _check_exists (line 104) | def _check_exists(self):
FILE: braincog/datasets/utils.py
function rescale (line 6) | def rescale(x, factor=None):
function dvs_channel_check_expend (line 20) | def dvs_channel_check_expend(x):
FILE: braincog/model_zoo/NeuEvo/__init__.py
function parse (line 25) | def parse(weights, operation_set,
function parse_genotype (line 142) | def parse_genotype(alphas, steps, multiplier, path=None,
FILE: braincog/model_zoo/NeuEvo/architect.py
function normalize (line 9) | def normalize(x):
function _concat (line 15) | def _concat(xs):
class Architect (line 19) | class Architect(object):
method __init__ (line 20) | def __init__(self, model, args):
method step (line 33) | def step(self, input_valid, target_valid):
method compute_Hw (line 43) | def compute_Hw(self, input_valid, target_valid):
method zero_grads (line 52) | def zero_grads(self, parameters):
method compute_eigenvalues (line 58) | def compute_eigenvalues(self):
method _hessian (line 62) | def _hessian(self, outputs, inputs, out=None, allow_unused=False):
FILE: braincog/model_zoo/NeuEvo/model.py
class MlpCell (line 13) | class MlpCell(BaseModule):
method __init__ (line 14) | def __init__(
method _compile (line 50) | def _compile(self, C, op_names, indices, concat):
method _forward_once (line 81) | def _forward_once(self, s0, s1, drop_prob):
method forward (line 111) | def forward(self, inputs):
class Cell (line 126) | class Cell(nn.Module):
method __init__ (line 127) | def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduct...
method _compile (line 151) | def _compile(self, C, op_names, indices, concat, reduction):
method forward (line 183) | def forward(self, s0, s1, drop_prob):
class DCOCell (line 216) | class DCOCell(nn.Module):
method __init__ (line 217) | def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduct...
method _compile (line 233) | def _compile(self, C, op_names, tos, froms, reduction):
method forward (line 252) | def forward(self, s0, s1, drop_prob):
class AuxiliaryHeadCIFAR (line 280) | class AuxiliaryHeadCIFAR(nn.Module):
method __init__ (line 281) | def __init__(self, C, num_classes, act_fun):
method forward (line 301) | def forward(self, x):
class AuxiliaryHeadImageNet (line 307) | class AuxiliaryHeadImageNet(nn.Module):
method __init__ (line 309) | def __init__(self, C, num_classes):
method forward (line 326) | def forward(self, x):
class NetworkCIFAR (line 333) | class NetworkCIFAR(BaseModule):
method __init__ (line 335) | def __init__(self,
method forward (line 436) | def forward(self, inputs):
class NetworkImageNet (line 474) | class NetworkImageNet(BaseModule):
method __init__ (line 476) | def __init__(self,
method forward (line 549) | def forward(self, inputs):
FILE: braincog/model_zoo/NeuEvo/model_search.py
function calc_weight (line 15) | def calc_weight(x):
function calc_loss (line 27) | def calc_loss(x):
class darts_fun (line 39) | class darts_fun(torch.autograd.Function):
method forward (line 41) | def forward(ctx, inputs, weights): # feature map / arch weight
method backward (line 47) | def backward(ctx, grad_output): # error signal
class MixedOp (line 64) | class MixedOp(nn.Module):
method __init__ (line 65) | def __init__(self, C, stride, act_fun):
method forward (line 76) | def forward(self, x, weights):
class Cell (line 84) | class Cell(nn.Module):
method __init__ (line 86) | def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reductio...
method forward (line 121) | def forward(self, s0, s1, weights):
class Network (line 153) | class Network(BaseModule):
method __init__ (line 155) | def __init__(self, C, num_classes, layers, criterion, steps=4, multipl...
method new (line 228) | def new(self):
method forward (line 235) | def forward(self, inputs):
method reset_fire_rate_record (line 261) | def reset_fire_rate_record(self):
method get_fire_per_step (line 265) | def get_fire_per_step(self):
method _loss (line 268) | def _loss(self, input1, target1, input2):
method _initialize_alphas (line 275) | def _initialize_alphas(self):
method arch_parameters (line 301) | def arch_parameters(self):
method genotype (line 304) | def genotype(self):
method states (line 317) | def states(self):
method restore (line 324) | def restore(self, states):
method update_history (line 328) | def update_history(self):
FILE: braincog/model_zoo/NeuEvo/operations.py
function si_relu (line 15) | def si_relu(x, positive):
class SiReLU (line 26) | class SiReLU(nn.Module):
method __init__ (line 27) | def __init__(self, positive=0):
method forward (line 31) | def forward(self, x):
function weight_init (line 35) | def weight_init(m):
class SiMLP (line 154) | class SiMLP(nn.Module):
method __init__ (line 155) | def __init__(self, c_in, c_out, act_fun=nn.ReLU, positive=0, *args, **...
method forward (line 163) | def forward(self, x):
class ReLUConvBN (line 168) | class ReLUConvBN(nn.Module):
method __init__ (line 173) | def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=T...
method forward (line 186) | def forward(self, x):
class DilConv (line 191) | class DilConv(nn.Module):
method __init__ (line 196) | def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation...
method forward (line 210) | def forward(self, x):
class SepConv (line 215) | class SepConv(nn.Module):
method __init__ (line 217) | def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=T...
method forward (line 236) | def forward(self, x):
class Identity (line 241) | class Identity(nn.Module):
method __init__ (line 243) | def __init__(self, positive=0):
method forward (line 247) | def forward(self, x):
class Zero (line 251) | class Zero(nn.Module):
method __init__ (line 253) | def __init__(self, stride):
method forward (line 257) | def forward(self, x):
class FactorizedReduce (line 263) | class FactorizedReduce(nn.Module):
method __init__ (line 265) | def __init__(self, C_in, C_out, affine=True, act_fun=nn.ReLU, positive...
method forward (line 279) | def forward(self, x):
class DeformConv (line 288) | class DeformConv(nn.Module):
method __init__ (line 289) | def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=T...
method forward (line 302) | def forward(self, x):
class Attention (line 307) | class Attention(Module):
method __init__ (line 312) | def __init__(self, dim, num_heads=4, attention_dropout=0.1, projection...
method forward (line 323) | def forward(self, x):
class TransformerEncoderLayer (line 339) | class TransformerEncoderLayer(Module):
method __init__ (line 345) | def __init__(self, d_model, nhead=4, dim_feedforward=256, dropout=0.1,
method forward (line 363) | def forward(self, src: torch.Tensor, *args, **kwargs) -> torch.Tensor:
function drop_path (line 376) | def drop_path(x, drop_prob: float = 0., training: bool = False):
class DropPath (line 398) | class DropPath(Module):
method __init__ (line 404) | def __init__(self, drop_prob=None):
method forward (line 408) | def forward(self, x):
FILE: braincog/model_zoo/NeuEvo/others.py
class CIFARNet_Wu (line 22) | class CIFARNet_Wu(BaseModule):
method __init__ (line 24) | def __init__(
method forward (line 67) | def forward(self, inputs):
class CIFARNet_Fang (line 79) | class CIFARNet_Fang(BaseModule):
method __init__ (line 81) | def __init__(
method forward (line 119) | def forward(self, inputs):
class DVS_CIFARNet_Fang (line 132) | class DVS_CIFARNet_Fang(BaseModule):
method __init__ (line 134) | def __init__(
method forward (line 172) | def forward(self, inputs):
FILE: braincog/model_zoo/backeinet.py
class MNISTNet (line 8) | class MNISTNet(BaseModule):
method __init__ (line 9) | def __init__(self, step=20, encode_type='rate', if_back=True, if_ei=Tr...
method forward (line 39) | def forward(self, inputs):
class CIFARNet (line 54) | class CIFARNet(BaseModule):
method __init__ (line 55) | def __init__(self, step=20, encode_type='rate', if_back=True, if_ei=Tr...
method forward (line 84) | def forward(self, inputs):
FILE: braincog/model_zoo/base_module.py
class BaseLinearModule (line 8) | class BaseLinearModule(nn.Module):
method __init__ (line 18) | def __init__(self,
method forward (line 43) | def forward(self, x):
class BaseConvModule (line 58) | class BaseConvModule(nn.Module):
method __init__ (line 70) | def __init__(self,
method forward (line 100) | def forward(self, x):
class BaseModule (line 113) | class BaseModule(nn.Module, abc.ABC):
method __init__ (line 123) | def __init__(self,
method reset (line 151) | def reset(self):
method set_attr (line 160) | def set_attr(self, attr, val):
method get_threshold (line 174) | def get_threshold(self):
method get_fp (line 186) | def get_fp(self, temporal_info=False):
method get_mem (line 201) | def get_mem(self, temporal_info=False):
method get_fire_rate (line 216) | def get_fire_rate(self, requires_grad=False):
method get_tot_spike (line 235) | def get_tot_spike(self):
method get_spike_info (line 250) | def get_spike_info(self):
method set_requires_fp (line 276) | def set_requires_fp(self, flag):
method set_requires_mem (line 281) | def set_requires_mem(self, flag):
method get_attr (line 286) | def get_attr(self, attr):
method forward (line 299) | def forward(self, inputs):
class DeformConvPack (line 303) | class DeformConvPack(nn.Module):
method __init__ (line 304) | def __init__(self,
method init_weights (line 342) | def init_weights(self):
method forward (line 347) | def forward(self, x):
FILE: braincog/model_zoo/bdmsnn.py
class BDMSNN (line 17) | class BDMSNN(nn.Module):
method __init__ (line 18) | def __init__(self, num_state, num_action, weight_exc, weight_inh, node...
method forward (line 79) | def forward(self, input):
method UpdateWeight (line 100) | def UpdateWeight(self, i, s, num_action, dw):
method reset (line 124) | def reset(self):
method getweight (line 134) | def getweight(self):
FILE: braincog/model_zoo/convnet.py
class BaseConvNet (line 12) | class BaseConvNet(BaseModule, abc.ABC):
method __init__ (line 13) | def __init__(self,
method _create_feature (line 56) | def _create_feature(self):
method _create_fc (line 60) | def _create_fc(self):
method forward (line 63) | def forward(self, inputs):
class MNISTConvNet (line 102) | class MNISTConvNet(BaseConvNet):
method __init__ (line 103) | def __init__(self,
method _create_feature (line 126) | def _create_feature(self):
method _create_fc (line 153) | def _create_fc(self):
class CifarConvNet (line 165) | class CifarConvNet(BaseConvNet):
method __init__ (line 166) | def __init__(self,
method _create_feature (line 188) | def _create_feature(self):
method _create_fc (line 220) | def _create_fc(self):
function mnist_convnet (line 230) | def mnist_convnet(step,
function cifar_convnet (line 255) | def cifar_convnet(step,
function dvs_convnet (line 283) | def dvs_convnet(step,
FILE: braincog/model_zoo/fc_snn.py
class STSC_Attention (line 12) | class STSC_Attention(nn.Module):
method __init__ (line 13) | def __init__(self, n_channel: int, dimension: int = 2, time_rf: int = ...
method forward (line 34) | def forward(self, x_seq: torch.Tensor):
class STSC_Temporal_Conv (line 58) | class STSC_Temporal_Conv(nn.Module):
method __init__ (line 59) | def __init__(self, channels: int, dimension: int = 2, time_rf: int = 2):
method forward (line 78) | def forward(self, x_seq: torch.Tensor):
class STSC (line 93) | class STSC(nn.Module):
method __init__ (line 94) | def __init__(self, in_channel: int, dimension: int = 2, time_rf_conv: ...
method forward (line 115) | def forward(self, x_seq: torch.Tensor):
class SHD_SNN (line 147) | class SHD_SNN(BaseModule):
method __init__ (line 153) | def __init__(self,
method forward (line 183) | def forward(self, inputs):
FILE: braincog/model_zoo/glsnn.py
class BaseGLSNN (line 12) | class BaseGLSNN(BaseModule):
method __init__ (line 20) | def __init__(self, input_size=784, hidden_sizes=[800] * 3, output_size...
method forward (line 41) | def forward(self, x):
method feedback (line 57) | def feedback(self, ff_value, y_label):
method set_gradient (line 74) | def set_gradient(self, x, y):
method forward_parameters (line 94) | def forward_parameters(self):
method feedback_parameters (line 100) | def feedback_parameters(self):
FILE: braincog/model_zoo/linearNet.py
class droDMTrainNet (line 8) | class droDMTrainNet(nn.Module):
method __init__ (line 13) | def __init__(self, connection):
method forward (line 33) | def forward(self, input):
method UpdateWeight (line 44) | def UpdateWeight(self, i, dw):
method reset (line 54) | def reset(self):
method getweight (line 64) | def getweight(self):
FILE: braincog/model_zoo/nonlinearNet.py
class droDMTestNet (line 8) | class droDMTestNet(nn.Module):
method __init__ (line 13) | def __init__(self, connection):
method forward (line 36) | def forward(self, input, input_da):
method UpdateWeight (line 49) | def UpdateWeight(self, i, dw):
method reset (line 59) | def reset(self):
method getweight (line 69) | def getweight(self):
FILE: braincog/model_zoo/qsnn.py
function sigma (line 34) | def sigma(x):
function deriv_sigma (line 41) | def deriv_sigma(x):
function kappa (line 52) | def kappa(x):
function get_kappas (line 56) | def get_kappas(n):
class Net (line 71) | class Net(nn.Module):
method __init__ (line 75) | def __init__(self, net_size):
method update_state (line 83) | def update_state(self, input_, label, test):
method routine (line 95) | def routine(self,
method update_weight (line 125) | def update_weight(self, lr, t, beta, eps):
method predict (line 134) | def predict(self,
class Hidden_layer (line 156) | class Hidden_layer(nn.Module):
method __init__ (line 160) | def __init__(self, input_size, neu_num, fb_neus):
method update_state (line 179) | def update_state(self, basal_input, apical_input, test):
method update_weight (line 185) | def update_weight(self, delta_, lr, t, beta, eps):
class Output_layer (line 205) | class Output_layer(nn.Module):
method __init__ (line 209) | def __init__(self, input_size, neu_num):
method update_state (line 229) | def update_state(self, basal_input, I, test):
method update_weight (line 239) | def update_weight(self, lr, t, beta, eps):
FILE: braincog/model_zoo/resnet.py
function conv3x3 (line 29) | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
function conv1x1 (line 41) | def conv1x1(in_planes, out_planes, stride=1):
class BasicBlock (line 50) | class BasicBlock(nn.Module):
method __init__ (line 66) | def __init__(self,
method forward (line 97) | def forward(self, x):
class Bottleneck (line 116) | class Bottleneck(nn.Module):
method __init__ (line 132) | def __init__(self,
method forward (line 163) | def forward(self, x):
class ResNet (line 185) | class ResNet(BaseModule):
method __init__ (line 203) | def __init__(self,
method _make_layer (line 352) | def _make_layer(self, block, planes, blocks, stride=1, dilate=False, n...
method forward (line 389) | def forward(self, inputs):
function _resnet (line 446) | def _resnet(arch, block, layers, pretrained=False, **kwargs):
function resnet9 (line 456) | def resnet9(pretrained=False, **kwargs):
function resnet18 (line 461) | def resnet18(pretrained=False, **kwargs):
function resnet34_half (line 467) | def resnet34_half(pretrained=False, **kwargs):
function resnet34 (line 474) | def resnet34(pretrained=False, **kwargs):
function resnet50_half (line 479) | def resnet50_half(pretrained=False, **kwargs):
function resnet50 (line 486) | def resnet50(pretrained=False, **kwargs):
function resnet101 (line 491) | def resnet101(pretrained=False, **kwargs):
function resnet152 (line 497) | def resnet152(pretrained=False, **kwargs):
function resnext50_32x4d (line 503) | def resnext50_32x4d(pretrained=False, **kwargs):
function resnext101_32x8d (line 511) | def resnext101_32x8d(pretrained=False, **kwargs):
function wide_resnet50_2 (line 519) | def wide_resnet50_2(pretrained=False, **kwargs):
function wide_resnet101_2 (line 526) | def wide_resnet101_2(pretrained=False, **kwargs):
FILE: braincog/model_zoo/resnet19_snn.py
function conv3x3 (line 21) | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
function conv1x1 (line 27) | def conv1x1(in_planes, out_planes, stride=1):
class BasicBlock (line 32) | class BasicBlock(nn.Module):
method __init__ (line 35) | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
method forward (line 54) | def forward(self, x):
class ResNet (line 74) | class ResNet(BaseModule):
method __init__ (line 75) | def __init__(self, block, layers, num_classes=10, zero_init_residual=F...
method _make_layer (line 140) | def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
method _forward_impl (line 168) | def _forward_impl(self, x):
method forward (line 188) | def forward(self, inputs):
function _resnet (line 195) | def _resnet(arch, block, layers, pretrained, progress, norm=ThresholdDep...
function resnet19 (line 204) | def resnet19(pretrained=False, progress=True, norm=ThresholdDependentBat...
FILE: braincog/model_zoo/rsnn.py
class RSNN (line 13) | class RSNN(nn.Module):
method __init__ (line 14) | def __init__(self,num_state,num_action):
method forward (line 36) | def forward(self, input):
method UpdateWeight (line 42) | def UpdateWeight(self,reward):
method reset (line 49) | def reset(self):
method getweight (line 54) | def getweight(self):
FILE: braincog/model_zoo/sew_resnet.py
function sew_function (line 31) | def sew_function(x: torch.Tensor, y: torch.Tensor, cnf:str):
function conv3x3 (line 43) | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
function conv1x1 (line 49) | def conv1x1(in_planes, out_planes, stride=1):
class BasicBlock (line 54) | class BasicBlock(nn.Module):
method __init__ (line 57) | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
method forward (line 79) | def forward(self, x):
method extra_repr (line 97) | def extra_repr(self) -> str:
class Bottleneck (line 100) | class Bottleneck(nn.Module):
method __init__ (line 109) | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
method forward (line 131) | def forward(self, x):
method extra_repr (line 153) | def extra_repr(self) -> str:
class SEWResNet (line 157) | class SEWResNet(BaseModule):
method __init__ (line 158) | def __init__(self, block, layers, num_classes=1000, step=8,encode_type...
method _make_layer (line 228) | def _make_layer(self, block, planes, blocks, stride=1, dilate=False, c...
method _forward_impl (line 252) | def _forward_impl(self, inputs):
method _forward_once (line 308) | def _forward_once(self,x):
method forward (line 327) | def forward(self, x):
class SEWResNet19 (line 331) | class SEWResNet19(BaseModule):
method __init__ (line 332) | def __init__(self, block, layers, num_classes=1000, step=8,encode_type...
method _make_layer (line 401) | def _make_layer(self, block, planes, blocks, stride=1, dilate=False, c...
method _forward_impl (line 425) | def _forward_impl(self, inputs):
method _forward_once (line 479) | def _forward_once(self,x):
method forward (line 498) | def forward(self, x):
class SEWResNetCifar (line 502) | class SEWResNetCifar(BaseModule):
method __init__ (line 503) | def __init__(self, block, layers, num_classes=1000, step=8,encode_type...
method _make_layer (line 572) | def _make_layer(self, block, planes, blocks, stride=1, dilate=False, c...
method _forward_impl (line 596) | def _forward_impl(self, inputs):
method _forward_once (line 648) | def _forward_once(self,x):
method forward (line 667) | def forward(self, x):
function _sew_resnet (line 672) | def _sew_resnet(arch, block, layers, pretrained, progress, cnf, **kwargs):
function sew_resnet19 (line 681) | def sew_resnet19(pretrained=False, progress=True, cnf: str = None, **kw...
function sew_resnet18 (line 701) | def sew_resnet18(pretrained=False, progress=True, cnf: str = None, **kw...
function sew_resnet20 (line 721) | def sew_resnet20(pretrained=False, progress=True, cnf: str = None, **kw...
function sew_resnet32 (line 740) | def sew_resnet32(pretrained=False, progress=True, cnf: str = None, **kw...
function sew_resnet44 (line 759) | def sew_resnet44(pretrained=False, progress=True, cnf: str = None, **kw...
function sew_resnet56 (line 778) | def sew_resnet56(pretrained=False, progress=True, cnf: str = None, **kw...
function sew_resnet34 (line 797) | def sew_resnet34(pretrained=False, progress=True, cnf: str = None, **kw...
function sew_resnet50 (line 817) | def sew_resnet50(pretrained=False, progress=True, cnf: str = None, **kw...
function sew_resnet101 (line 837) | def sew_resnet101(pretrained=False, progress=True, cnf: str = None, **k...
function sew_resnet152 (line 857) | def sew_resnet152(pretrained=False, progress=True, cnf: str = None, **k...
function sew_resnext50_32x4d (line 877) | def sew_resnext50_32x4d(pretrained=False, progress=True, cnf: str = None...
function sew_resnext34_32x4d (line 899) | def sew_resnext34_32x4d(pretrained=False, progress=True, cnf: str = None...
function sew_resnext101_32x8d (line 920) | def sew_resnext101_32x8d(pretrained=False, progress=True, cnf: str = Non...
function sew_wide_resnet50_2 (line 941) | def sew_wide_resnet50_2(pretrained=False, progress=True, cnf: str = None...
function sew_wide_resnet101_2 (line 966) | def sew_wide_resnet101_2(pretrained=False, progress=True, cnf: str = Non...
FILE: braincog/model_zoo/vgg_snn.py
class SNN7_tiny (line 22) | class SNN7_tiny(BaseModule):
method __init__ (line 23) | def __init__(self,
method forward (line 58) | def forward(self, inputs):
class SNN5 (line 80) | class SNN5(BaseModule):
method __init__ (line 81) | def __init__(self,
method forward (line 120) | def forward(self, inputs):
class VGG_SNN (line 142) | class VGG_SNN(BaseModule):
method __init__ (line 143) | def __init__(self,
method forward (line 183) | def forward(self, inputs):
FILE: braincog/utils.py
function setup_seed (line 13) | def setup_seed(seed):
function random_gradient (line 35) | def random_gradient(model: nn.Module, sigma: float):
class AverageMeter (line 49) | class AverageMeter(object):
method __init__ (line 50) | def __init__(self):
method reset (line 53) | def reset(self):
method update (line 58) | def update(self, val, n=1):
class TensorGather (line 63) | class TensorGather(object):
method __init__ (line 64) | def __init__(self):
method reset (line 66) | def reset(self):
method update (line 69) | def update(self, val):
function accuracy (line 73) | def accuracy(output, target, topk=(1,)):
function mse (line 90) | def mse(x, y):
function rand_ortho (line 95) | def rand_ortho(shape, irange):
function adjust_surrogate_coeff (line 101) | def adjust_surrogate_coeff(epoch, tot_epochs):
function save_feature_map (line 111) | def save_feature_map(x, dir=''):
function save_spike_info (line 126) | def save_spike_info(fname, epoch, batch_idx, step, avg, var, spike, avg_...
function calc_aurc (line 159) | def calc_aurc(confidences, labels):
FILE: examples/Embodied_Cognition/RHI/RHI_Test.py
class CustomLinear (line 23) | class CustomLinear(nn.Module):
method __init__ (line 24) | def __init__(self, weight,mask=None):
method forward (line 29) | def forward(self, x: torch.Tensor):
method update (line 34) | def update(self, dw):
class M1Net (line 40) | class M1Net(nn.Module):
method __init__ (line 41) | def __init__(self,connection):
method forward (line 47) | def forward(self, input):
method reset (line 66) | def reset(self):
class VNet (line 70) | class VNet(nn.Module):
method __init__ (line 71) | def __init__(self,connection):
method forward (line 77) | def forward(self, input):
method reset (line 96) | def reset(self):
class S1Net (line 100) | class S1Net(nn.Module):
method __init__ (line 101) | def __init__(self,connection):
method forward (line 107) | def forward(self, input, FR, C):
method reset (line 137) | def reset(self):
class EBANet (line 141) | class EBANet(nn.Module):
method __init__ (line 142) | def __init__(self,connection):
method forward (line 148) | def forward(self, input, FR, C):
method reset (line 178) | def reset(self):
class TPJNet (line 182) | class TPJNet(nn.Module):
method __init__ (line 183) | def __init__(self,connection):
method forward (line 189) | def forward(self, input, FR, C):
method reset (line 219) | def reset(self):
method UpdateWeight (line 223) | def UpdateWeight(self, i, W):
class AINet (line 226) | class AINet(nn.Module):
method __init__ (line 227) | def __init__(self,connection):
method forward (line 233) | def forward(self, input, FR, C):
method reset (line 263) | def reset(self):
method UpdateWeight (line 267) | def UpdateWeight(self, i, W):
function DeltaWeight (line 271) | def DeltaWeight(Pre, Pre_n, Post, Post_n):
FILE: examples/Embodied_Cognition/RHI/RHI_Train.py
class CustomLinear (line 23) | class CustomLinear(nn.Module):
method __init__ (line 24) | def __init__(self, weight,mask=None):
method forward (line 29) | def forward(self, x: torch.Tensor):
method update (line 34) | def update(self, dw):
class M1Net (line 40) | class M1Net(nn.Module):
method __init__ (line 41) | def __init__(self,connection):
method forward (line 47) | def forward(self, input):
method reset (line 66) | def reset(self):
class VNet (line 70) | class VNet(nn.Module):
method __init__ (line 71) | def __init__(self,connection):
method forward (line 77) | def forward(self, input):
method reset (line 96) | def reset(self):
class S1Net (line 100) | class S1Net(nn.Module):
method __init__ (line 101) | def __init__(self,connection):
method forward (line 107) | def forward(self, input, FR, C, Fired, W_LatInh):
method reset (line 145) | def reset(self):
class EBANet (line 149) | class EBANet(nn.Module):
method __init__ (line 150) | def __init__(self,connection):
method forward (line 156) | def forward(self, input, FR, C, Fired, W_LatInh):
method reset (line 194) | def reset(self):
class TPJNet (line 198) | class TPJNet(nn.Module):
method __init__ (line 199) | def __init__(self,connection):
method forward (line 205) | def forward(self, input, FR, C):
method reset (line 235) | def reset(self):
method UpdateWeight (line 239) | def UpdateWeight(self, i, W):
class AINet (line 242) | class AINet(nn.Module):
method __init__ (line 243) | def __init__(self,connection):
method forward (line 249) | def forward(self, input, FR, C):
method reset (line 279) | def reset(self):
method UpdateWeight (line 283) | def UpdateWeight(self, i, W, WIn):
function DeltaWeight (line 287) | def DeltaWeight(Pre, Pre_n, Post, Post_n):
FILE: examples/Hardware_acceleration/firefly_v1_schedule_on_pynq.py
class FireFlyV1ConvSchedule (line 9) | class FireFlyV1ConvSchedule:
method __init__ (line 10) | def __init__(
method gen_cmd (line 119) | def gen_cmd(self):
method send_config (line 139) | def send_config(self):
method begin_schedule_non_blocking (line 156) | def begin_schedule_non_blocking(self):
method begin_schedule_blocking (line 161) | def begin_schedule_blocking(self):
method clear_schedule (line 167) | def clear_schedule(self):
method run_all (line 170) | def run_all(self):
method read_status (line 199) | def read_status(self):
function create_schedule (line 212) | def create_schedule(model_config_list: list,
function schedule_run_all (line 320) | def schedule_run_all(schedule_list):
function gen_cmd_array (line 325) | def gen_cmd_array(schedule_list):
function init_firefly_c_lib (line 332) | def init_firefly_c_lib(path, schedule_list):
function init_firefly_c_lib_with_time (line 347) | def init_firefly_c_lib_with_time(path, schedule_list):
function firefly_v1_simulate (line 362) | def firefly_v1_simulate(model_config_list, x):
function evaluate_simulate (line 380) | def evaluate_simulate(model_config_list, sample):
FILE: examples/Hardware_acceleration/standalone_utils.py
function get_im2col_indices (line 7) | def get_im2col_indices(x_shape, field_height, field_width, padding=1, st...
function im2col_indices (line 27) | def im2col_indices(x, field_height, field_width, padding=1, stride=1):
function max_pool_forward_reshape (line 37) | def max_pool_forward_reshape(x, pool_param):
function max_pool_forward_fast (line 49) | def max_pool_forward_fast(x, pool_param):
function max_pool_forward_im2col (line 63) | def max_pool_forward_im2col(x, pool_param):
function conv_forward_fast (line 82) | def conv_forward_fast(x, w, b, pad=1, stride=1):
function spike_map_pack_to_bytes_array (line 101) | def spike_map_pack_to_bytes_array(spike_map, parallel):
function bytes_array_split_to_spike_map (line 107) | def bytes_array_split_to_spike_map(buf_in, time_step, parallel, H, W):
function preprocess (line 113) | def preprocess(model_config_list, x, parallel):
function integrate_and_fire (line 128) | def integrate_and_fire(y, threshold):
function direct_coding (line 139) | def direct_coding(x, w, b, time_step, threshold):
function conv_ifnode_forward (line 145) | def conv_ifnode_forward(x, w, b, threshold):
function conv_ifnode_maxpool_forward (line 151) | def conv_ifnode_maxpool_forward(x, w, b, threshold):
function linear_wta_forward (line 157) | def linear_wta_forward(x, w, b):
function linear_ifnode_forward (line 169) | def linear_ifnode_forward(x, w, b, threshold):
function pad_conv_weight_round_to_parallel (line 180) | def pad_conv_weight_round_to_parallel(parallel, weight, pad_output_chann...
function pad_linear_weight_round_to_parallel (line 191) | def pad_linear_weight_round_to_parallel(parallel, weight):
function pad_linear_weight_round_to_factor (line 198) | def pad_linear_weight_round_to_factor(weight, factor):
function pad_bias_round_to_parallel (line 206) | def pad_bias_round_to_parallel(parallel, bias, pad_value=0):
function np_quantize_per_tensor (line 213) | def np_quantize_per_tensor(x, scale, zero_point):
function np_quantize_prepare (line 221) | def np_quantize_prepare(x, scale, zero_point):
function conv_weight_channel_tiling (line 226) | def conv_weight_channel_tiling(parallel, weight):
function linear_weight_channel_tiling (line 230) | def linear_weight_channel_tiling(parallel, weight):
function conv_to_linear_weight_tiling (line 234) | def conv_to_linear_weight_tiling(parallel, h, w, weight):
function init_input_buffer (line 238) | def init_input_buffer(input_spikes,
function get_from_output_buffer (line 254) | def get_from_output_buffer(output_buffer,
function get_output_index (line 271) | def get_output_index(buffer, parallel):
function save_model_config_list (line 281) | def save_model_config_list(model_config_list, path):
function load_model_config_list (line 286) | def load_model_config_list(path):
FILE: examples/Knowledge_Representation_and_Reasoning/CKRGSNN/main.py
class CKRNet (line 19) | class CKRNet(BrainArea):
method __init__ (line 24) | def __init__(self, w1, w2):
method forward (line 36) | def forward(self, x):
method reset (line 44) | def reset(self):
function S_bound (line 48) | def S_bound(S):
FILE: examples/Knowledge_Representation_and_Reasoning/CRSNN/main.py
class CRNet (line 17) | class CRNet(BrainArea):
method __init__ (line 29) | def __init__(self, w1, w2):
method forward (line 41) | def forward(self, x):
method reset (line 52) | def reset(self):
function S_bound (line 56) | def S_bound(S):
FILE: examples/Knowledge_Representation_and_Reasoning/SPSNN/main.py
class SPNet (line 20) | class SPNet(BrainArea):
method __init__ (line 33) | def __init__(self, w1, w2 ):
method forward (line 45) | def forward(self, x):
method reset (line 57) | def reset(self):
function S_bound (line 62) | def S_bound(S):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Areas/apac.py
class APAC (line 10) | class APAC():
method __init__ (line 16) | def __init__(self):
method encodingNote (line 23) | def encodingNote(self,NoteID):
method encodingMIDINote (line 31) | def encodingMIDINote(self,p):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Areas/cortex.py
class Cortex (line 16) | class Cortex():
method __init__ (line 21) | def __init__(self, neutype, dt):
method addSubGoalToPFC (line 27) | def addSubGoalToPFC(self, goalname):
method addComposerToPFC (line 33) | def addComposerToPFC(self, composername):
method addGenreToPFC (line 39) | def addGenreToPFC(self, genrename):
method musicSequenceMemroyInit (line 45) | def musicSequenceMemroyInit(self):
method rememberANote (line 48) | def rememberANote(self, goalname, noteName, order):
method connectKeyAndNotesUsingKSModel (line 66) | def connectKeyAndNotesUsingKSModel(self,keys, Notes):
method connectKeyAndNotes (line 90) | def connectKeyAndNotes(self,keys, Notes):
method connectKeyAndNotesUsingKSModel (line 107) | def connectKeyAndNotesUsingKSModel(self,keys, Notes):
method rememberANoteWithKnowledge (line 130) | def rememberANoteWithKnowledge(self,goalname, composername, genrename,...
method rememberANoteandTempo (line 161) | def rememberANoteandTempo(self, goalname, composername, genrename, tra...
method actionSequenceMemoryInit (line 223) | def actionSequenceMemoryInit(self):
method recallMusicPFC (line 226) | def recallMusicPFC(self, goalName):
method recallMusicByEpisode (line 234) | def recallMusicByEpisode(self, episodeNotes): # using time window sea...
method generateEx_Nihilo (line 248) | def generateEx_Nihilo(self, firstNote, durations, length):
method generateEx_Nihilo2 (line 310) | def generateEx_Nihilo2(self, firstNote, durations, length):
method generateEx_NihiloAccordingToGenre (line 358) | def generateEx_NihiloAccordingToGenre(self, genreName, firstNote, dura...
method generateEx_NihiloAccordingToComposer (line 407) | def generateEx_NihiloAccordingToComposer(self, composerName, firstNote...
method generateMelodyWithKey (line 468) | def generateMelodyWithKey(self, key, firstNotes, durations, length):
method recallActionIPS (line 528) | def recallActionIPS(self, goalName):
method generate2TrackMusic (line 533) | def generate2TrackMusic(self, firstNotes, durations, lengths):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Areas/pac.py
class PAC (line 16) | class PAC(BrainArea,SequenceMemory):
method __init__ (line 21) | def __init__(self, neutype):
method forward (line 27) | def forward(self, x):
method createActionSequenceMem (line 30) | def createActionSequenceMem(self, layernum, neutype):
method doRemembering_note_only (line 40) | def doRemembering_note_only(self, note, order, dt, t):
method doRemembering (line 50) | def doRemembering(self, trackIndex, noteIndex, order, dt, t, tinterval...
method doConnectToTitle (line 72) | def doConnectToTitle(self, title, track, order):
method doConnectToComposer (line 76) | def doConnectToComposer(self, composer, track, order):
method doConnectToGenre (line 80) | def doConnectToGenre(self, genre, track, order):
method generateEx_Nihilo (line 84) | def generateEx_Nihilo(self, firstNote, durations, order, dt, t):
method generateSimgleTrackNotes (line 109) | def generateSimgleTrackNotes(self, trackIndex, firstNote, durations, o...
class Music_Sequence_Mem (line 136) | class Music_Sequence_Mem(SequenceMemory):
method __init__ (line 141) | def __init__(self, neutype):
method createActionSequenceMem (line 147) | def createActionSequenceMem(self, layernum, neutype):
method doRemembering_note_only (line 157) | def doRemembering_note_only(self, note, order, dt, t):
method doRemembering (line 167) | def doRemembering(self, trackIndex, noteIndex, order, dt, t, tinterval...
method recallByEpisode (line 187) | def recallByEpisode(self, episodeNotes, goals):
method recallByEpisode2 (line 277) | def recallByEpisode2(self, episodeNotes, goals):
method doConnectToTitle (line 432) | def doConnectToTitle(self, title, track, order):
method doConnectToComposer (line 436) | def doConnectToComposer(self, composer, track, order):
method doConnectToGenre (line 440) | def doConnectToGenre(self, genre, track, order):
method doConnectToEmotion (line 444) | def doConnectToEmotion(self, emo, track, order):
method doConnectToKey (line 448) | def doConnectToKey(self, key, track, order, noteIndex):
method doConnectToMode (line 483) | def doConnectToMode(self, mode, keyName, track, order, noteIndex): # ...
method generateEx_Nihilo (line 520) | def generateEx_Nihilo(self, firstNote, durations, order, dt, t):
method generateMelodyWithTone (line 545) | def generateMelodyWithTone(self, firstNote, duration, tone, order, dt,...
method generateSimgleTrackNotes (line 576) | def generateSimgleTrackNotes(self, trackIndex, firstNote, durations, o...
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Areas/pfc.py
class PFC (line 12) | class PFC(PFC):
method __init__ (line 17) | def __init__(self, neutype):
method addNewKey (line 31) | def addNewKey(self):
method addNewSubGoal (line 37) | def addNewSubGoal(self, goalname):
method addNewComposer (line 41) | def addNewComposer(self, composername):
method addNewGenre (line 45) | def addNewGenre(self, genrename):
method addNewMode (line 49) | def addNewMode(self):
method addNewKey (line 71) | def addNewKey(self):
method addNewChord (line 77) | def addNewChord(self):
method setTestStates (line 105) | def setTestStates(self):
method doRecalling (line 114) | def doRecalling(self, goalname, asm):
method doRecalling2 (line 139) | def doRecalling2(self, goalname, asm):
method doRecalling3 (line 180) | def doRecalling3(self,goalname,asm):
method doRemebering (line 236) | def doRemebering(self, goalname, dt, t):
method doRememberingComposer (line 243) | def doRememberingComposer(self, composername, dt, t):
method doRememberingGenre (line 249) | def doRememberingGenre(self, genrename, dt, t):
method doRememberingKey (line 255) | def doRememberingKey(self, key, dt, t):
method doRememberingMode (line 261) | def doRememberingMode(self,mode, dt,t):
method innerLearning (line 267) | def innerLearning(self, goalname, composer, genre):
method inhibitGenres (line 320) | def inhibitGenres(self,dt,t):
method inhibiteGoals (line 327) | def inhibiteGoals(self, dt, t):
method inhibitComposers (line 334) | def inhibitComposers(self, dt, t):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/PAC.py
class PAC (line 10) | class PAC(BrainArea):
method __int__ (line 12) | def __int__(self,w,mask):
method forward (line 19) | def forward(self, x):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/cluster.py
class Cluster (line 6) | class Cluster():
method __init__ (line 11) | def __init__(self, neutype='LIF', neunum=10):
method createClusterNetwork (line 22) | def createClusterNetwork(self):
method setInhibitoryNeurons (line 43) | def setInhibitoryNeurons(self, ratio_inhneuron):
method setPropertiesofNeurons (line 47) | def setPropertiesofNeurons(self, groupID, layerType, layerID):
method setTestStates (line 53) | def setTestStates(self):
method createFullConnections (line 57) | def createFullConnections(self): # all in all connections
method createInhibitoryConnections (line 67) | def createInhibitoryConnections(self): # all in all inhibitory connec...
method writeSelfInfoToJson (line 79) | def writeSelfInfoToJson(self):
method writeSpikeInfoToJson (line 91) | def writeSpikeInfoToJson(self):
class ModeCluster (line 102) | class ModeCluster(Cluster):
method __init__ (line 103) | def __init__(self, neutype, neunum):
method createClusterNetwork (line 106) | def createClusterNetwork(self, areaName):
class KeyCluster (line 122) | class KeyCluster(Cluster):
method __init__ (line 123) | def __init__(self, neutype, neunum):
method createClusterNetwork (line 129) | def createClusterNetwork(self,tone,areaName):
class ChordCluster (line 166) | class ChordCluster(Cluster):
method __init__ (line 167) | def __init__(self, neutype,neunum):
method createClusterNetwork (line 170) | def createClusterNetwork(self):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/composercluster.py
class ComposerCluster (line 5) | class ComposerCluster(Cluster):
method __init__ (line 10) | def __init__(self, neutype, neunum):
method createClusterNetwork (line 16) | def createClusterNetwork(self):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/composerlayer.py
class ComposerLayer (line 5) | class ComposerLayer(Layer):
method __init__ (line 10) | def __init__(self, neutype='LIF'):
method setTestStates (line 14) | def setTestStates(self):
method addNewGroups (line 18) | def addNewGroups(self, groupID, layerID, neunum, composername):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/composerlifneuron.py
class ComposerLIFNeuron (line 4) | class ComposerLIFNeuron(LIFNeuron):
method __init__ (line 9) | def __init__(self, tau_ref=0, vthresh=5, Rm=2, Cm=0.2):
method update (line 15) | def update(self, dt, t):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/genrecluster.py
class GenreCluster (line 4) | class GenreCluster(Cluster):
method __init__ (line 9) | def __init__(self, neutype, neunum):
method createClusterNetwork (line 15) | def createClusterNetwork(self):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/genrelayer.py
class GenreLayer (line 3) | class GenreLayer(Layer):
method __init__ (line 8) | def __init__(self, neutype='LIF'):
method setTestStates (line 12) | def setTestStates(self):
method addNewGroups (line 16) | def addNewGroups(self, groupID, layerID, neunum, genrename):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/genrelifneuron.py
class GenreLIFNeuron (line 3) | class GenreLIFNeuron(LIFNeuron):
method __init__ (line 8) | def __init__(self, tau_ref=0, vthresh=5, Rm=2, Cm=0.2):
method update (line 14) | def update(self, dt, t):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/izhikevichneuron.py
class IzhikevichNeuron (line 12) | class IzhikevichNeuron(IzhNodeMU):
method __init__ (line 18) | def __init__(self, a = 0.1,b = 0.2,c = -65,d = 8,vthresh = 30, dt=0.1):
method update_old (line 50) | def update_old(self,dt,t):
method update (line 65) | def update(self,dt,t,state):
method update_learn (line 70) | def update_learn(self,dt,t):
method update_test (line 82) | def update_test(self,dt,t):
method update_normal (line 91) | def update_normal(self,dt,t):
method updateSynapses (line 102) | def updateSynapses(self,t):
method updateCurrentOfLowerAndUpperLayer (line 106) | def updateCurrentOfLowerAndUpperLayer(self,t):
method setTestStates (line 130) | def setTestStates(self):
method writeBasicInfoToJson (line 140) | def writeBasicInfoToJson(self):
method writeSpikeTimeToJson (line 162) | def writeSpikeTimeToJson(self):
class NoteIzhikevichNeuron (line 172) | class NoteIzhikevichNeuron(IzhikevichNeuron):
method __init__ (line 173) | def __init__(self,a = 0.1,b = 0.2,c = -65,d = 8,vthresh = 30):
method setPreference (line 175) | def setPreference(self):
method computeFilterCurrent (line 177) | def computeFilterCurrent(self):
method updateCurrentOfLowerAndUpperLayer (line 181) | def updateCurrentOfLowerAndUpperLayer(self, t):
class TempoIzhikevichNeuron (line 205) | class TempoIzhikevichNeuron(IzhikevichNeuron):
method __init__ (line 206) | def __init__(self,a = 0.1,b = 0.2,c = -65,d = 8,vthresh = 30):
method setPreference (line 208) | def setPreference(self):
method computeFilterCurrent (line 211) | def computeFilterCurrent(self):
method updateCurrentOfLowerAndUpperLayer (line 215) | def updateCurrentOfLowerAndUpperLayer(self, t):
class TitleIzhikevichNeuron (line 243) | class TitleIzhikevichNeuron(IzhikevichNeuron):
method __init__ (line 244) | def __init__(self,a = 0.1,b = 0.2,c = -65,d = 8,vthresh=30):
class ComposerIzhikevichNeuron (line 247) | class ComposerIzhikevichNeuron(IzhikevichNeuron):
method __init__ (line 248) | def __init__(self,a = 0.1,b = 0.2,c = -65,d = 8,vthresh=30):
class GenreIzhikevichNeuron (line 251) | class GenreIzhikevichNeuron(IzhikevichNeuron):
method __init__ (line 252) | def __init__(self, a = 0.1,b = 0.2,c = -65,d = 8, vthresh=30):
class AmyIzhikevichNeuron (line 255) | class AmyIzhikevichNeuron(IzhikevichNeuron):
method __init__ (line 256) | def __init__(self,a = 0.1,b = 0.2,c = -65,d = 8,vthresh=30):
class DirectionIzhikevichNeuron (line 259) | class DirectionIzhikevichNeuron(IzhikevichNeuron):
method __init__ (line 260) | def __init__(self,a = 0.1,b = 0.2,c = -65,d = 8,vthresh=30):
method setPreference (line 263) | def setPreference(self):
method computeFilterCurrent (line 267) | def computeFilterCurrent(self, input):
method updateCurrentOfLowerAndUpperLayer (line 271) | def updateCurrentOfLowerAndUpperLayer(self, t):
class GridIzhikevichCell (line 296) | class GridIzhikevichCell(IzhikevichNeuron):
method __init__ (line 297) | def __init__(self,a = 0.1,b = 0.2,c = -65,d = 8,vthresh = 30):
class KeyIzhikevichNeuron (line 299) | class KeyIzhikevichNeuron(IzhikevichNeuron):
method __init__ (line 300) | def __init__(self,a = 0.1,b = 0.2,c = -65,d = 8,vthresh = 30):
class ModeIzhikevichNeuron (line 303) | class ModeIzhikevichNeuron(IzhikevichNeuron):
method __init__ (line 304) | def __init__(self,a = 0.1,b = 0.2,c = -65,d = 8,vthresh = 30):
class ChordIzhikevichNeuron (line 307) | class ChordIzhikevichNeuron(IzhikevichNeuron):
method __init__ (line 308) | def __init__(self,a = 0.1,b = 0.2,c = -65,d = 8, vthresh = 30):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/layer.py
class Layer (line 4) | class Layer():
method __init__ (line 10) | def __init__(self, neutype):
method resetProperties (line 18) | def resetProperties(self):
method addNewGroups (line 21) | def addNewGroups(self, layerID, neunum):
class ModeLayer (line 24) | class ModeLayer(Layer):
method __init__ (line 25) | def __init__(self, neutype = 'LIF'):
method setTestStates (line 29) | def setTestStates(self):
method addNewGroups (line 33) | def addNewGroups(self, groupID, layerID, neunum, modeName):
class KeyLayer (line 41) | class KeyLayer(Layer):
method __init__ (line 42) | def __init__(self, neutype='LIF'):
method setTestStates (line 46) | def setTestStates(self):
method addNewGroups (line 50) | def addNewGroups(self, groupID, layerID, neunum, key):
class ChordLayer (line 58) | class ChordLayer(Layer):
method __init__ (line 59) | def __init__(self, neutype = 'LIF'):
method setTestStates (line 62) | def setTestStates(self):
method addNewGroups (line 66) | def addNewGroups(self,groupID, layerID, neunum):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/lifneuron.py
class LIFNeuron (line 5) | class LIFNeuron(node.LIFNode):
method __init__ (line 7) | def __init__(self, tau_ref = 0, vthresh = 5, Rm = 2, Cm = 0.2,dt = 0.1...
method update (line 36) | def update(self, dt, t, state): # state = 'learn' or state = 'test'
method update_learn (line 50) | def update_learn(self, dt, t):
method update_test (line 70) | def update_test(self, dt, t):
method update_normal (line 81) | def update_normal(self, dt, t):
method updateSynapses (line 93) | def updateSynapses(self, t):
method setTestStates (line 97) | def setTestStates(self):
method computeFilterCurrent (line 108) | def computeFilterCurrent(self):
method setPreference (line 111) | def setPreference(self): # set preference of a neuron or called selec...
method writeBasicInfoToJson (line 115) | def writeBasicInfoToJson(self, areaName):
method writeSpikeTimeToJson (line 139) | def writeSpikeTimeToJson(self):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/note.py
class Note (line 7) | class Note():
method __init__ (line 12) | def __init__(self):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/notecluster.py
class NoteCluster (line 5) | class NoteCluster(Cluster):
method __init__ (line 10) | def __init__(self, neutype, neunum):
method createClusterNetwork (line 16) | def createClusterNetwork(self):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/notelifneuron.py
class NoteLIFNeuron (line 4) | class NoteLIFNeuron(LIFNeuron):
method __init__ (line 9) | def __init__(self, tau_ref=0.5, vthresh=5, Rm=2, Cm=0.2):
method setPreference (line 15) | def setPreference(self):
method computeFilterCurrent (line 18) | def computeFilterCurrent(self):
method updateCurrentOfLowerAndUpperLayer (line 22) | def updateCurrentOfLowerAndUpperLayer(self, t):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/notesequencelayer.py
class NoteSequenceLayer (line 6) | class NoteSequenceLayer(SequenceLayer):
method __init__ (line 11) | def __init__(self, neutype):
method addNewGroups (line 17) | def addNewGroups(self, GroupID, layerID, neunum):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/pitch.py
class Pitch (line 7) | class Pitch():
method __init__ (line 13) | def __init__(self):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/sequencelayer.py
class SequenceLayer (line 6) | class SequenceLayer(Layer):
method __init__ (line 11) | def __init__(self, neutype='LIF'):
method addNewGroups (line 19) | def addNewGroups(self, GroupID, layerID, neunum):
method setTestStates (line 39) | def setTestStates(self):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/sequencememory.py
class SequenceMemory (line 6) | class SequenceMemory():
method __init__ (line 11) | def __init__(self, neutype):
method createActionSequenceMem (line 18) | def createActionSequenceMem(self, layernum, neutype, neunumpergroup):
method doRemembering (line 21) | def doRemembering(self):
method doConnecting (line 24) | def doConnecting(self, goal, sl, order):
method setTestStates (line 192) | def setTestStates(self):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/synapse.py
class Synapse (line 3) | class Synapse():
method __init__ (line 8) | def __init__(self, pre, post):
method computeWeight (line 20) | def computeWeight(self, t):
method computeShortTermFacilitation (line 56) | def computeShortTermFacilitation(self, t):
method computeShortTermFacilitation2 (line 79) | def computeShortTermFacilitation2(self, t):
method computeShortTermReduction (line 96) | def computeShortTermReduction(self, t):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/tempocluster.py
class TempoCluster (line 5) | class TempoCluster(Cluster):
method __init__ (line 10) | def __init__(self, neutype, neunum):
method createClusterNetwork (line 16) | def createClusterNetwork(self):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/tempolifneuron.py
class TempoLIFNeuron (line 5) | class TempoLIFNeuron(LIFNeuron):
method __init__ (line 10) | def __init__(self, tau_ref=0.5, vthresh=5, Rm=2, Cm=0.2):
method setPreference (line 16) | def setPreference(self):
method computeFilterCurrent (line 20) | def computeFilterCurrent(self):
method updateCurrentOfLowerAndUpperLayer (line 24) | def updateCurrentOfLowerAndUpperLayer(self, t):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/temposequencelayer.py
class TempoSequenceLayer (line 6) | class TempoSequenceLayer(SequenceLayer):
method __init__ (line 11) | def __init__(self, neutype):
method addNewGroups (line 17) | def addNewGroups(self, GroupID, layerID, neunum):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/titlecluster.py
class TitleCluster (line 5) | class TitleCluster(Cluster):
method __init__ (line 10) | def __init__(self, neutype, neunum):
method createClusterNetwork (line 17) | def createClusterNetwork(self):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/titlelayer.py
class TitleLayer (line 7) | class TitleLayer(Layer):
method __init__ (line 12) | def __init__(self, neutype='LIF'):
method setTestStates (line 19) | def setTestStates(self):
method addNewGroups (line 23) | def addNewGroups(self, groupID, layerID, neunum, goalname):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/titlelifneuron.py
class TitleLIFNeuron (line 5) | class TitleLIFNeuron(LIFNeuron):
method __init__ (line 10) | def __init__(self, tau_ref=0, vthresh=5, Rm=2, Cm=0.2):
method updateCurrentOfLowerAndUpperLayer (line 16) | def updateCurrentOfLowerAndUpperLayer(self, t):
method update (line 30) | def update(self, dt, t):
method computeFiringRate (line 41) | def computeFiringRate(self):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/api/music_engine_api.py
class EngineAPI (line 11) | class EngineAPI():
method __init__ (line 16) | def __init__(self):
method cortexInit (line 23) | def cortexInit(self):
method rememberMusic (line 29) | def rememberMusic(self, muiscName, composerName="None"):
method learnFourPartMusic (line 58) | def learnFourPartMusic(self,xmldata, musicName, composerName="None"):
method rememberPartNotes (line 73) | def rememberPartNotes(self,musicName, composerName, genreName, emo, ke...
method rememberMIDIMusic (line 93) | def rememberMIDIMusic(self, musicName, composerName, noteLength, fileN...
method rememberTrackNotes (line 113) | def rememberTrackNotes(self, musicName, composerName, genreName, track...
method rememberNotes (line 196) | def rememberNotes(self, MusicName, notes, intervals, tempo=True):
method rememberANote (line 213) | def rememberANote(self, MusicName, ComposerName, genreName, TrackIndex...
method memorizing (line 223) | def memorizing(self,MusicName, ComposerName, noteLength, fileName):
method recallMusic (line 234) | def recallMusic(self, musicName):
method generateEx_Nihilo (line 255) | def generateEx_Nihilo(self, firstNote, durations, length,gen_fName):
method generateEx_NihiloAccordingToGenre (line 268) | def generateEx_NihiloAccordingToGenre(self, genreName, firstNote, dura...
method generateEx_NihiloAccordingToComposer (line 282) | def generateEx_NihiloAccordingToComposer(self, composerName, firstNote...
method generate2TrackMusic (line 296) | def generate2TrackMusic(self, firstNotes, durations, lengths):
method generateMelodyWithKey (line 300) | def generateMelodyWithKey(self,tone, firstNotes,durations = None,lengt...
method writeMidiFile (line 306) | def writeMidiFile(self,fileName, mudic):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/conf/conf.py
class Conf (line 4) | class Conf():
method __init__ (line 9) | def __init__(self, neutype="LIF", task="MusicLearning", dt=0.1):
method readNoteFiles (line 29) | def readNoteFiles(self):
method readGenreFils (line 42) | def readGenreFils(self):
method readEmotionFiles (line 56) | def readEmotionFiles(self):
method readKeysFile (line 69) | def readKeysFile(self):
method readKeys2IndexFile (line 86) | def readKeys2IndexFile(self):
method readChordsFile (line 97) | def readChordsFile(self):
method readModesFile (line 112) | def readModesFile(self):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/tools/generateData.py
class joint (line 9) | class joint():
method __init__ (line 10) | def __init__(self):
method self2dic (line 14) | def self2dic(self):
class data (line 19) | class data():
method __init__ (line 21) | def __init__(self):
method self2dic (line 26) | def self2dic(self):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/tools/msg.py
class MyListener (line 9) | class MyListener(object):
method on_error (line 10) | def on_error(self, headers, message):
method on_message (line 12) | def on_message(self, headers, message):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/tools/msgq.py
function createMSQ (line 10) | def createMSQ():
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/tools/readjson.py
function readjsonFile (line 11) | def readjsonFile(filename):
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/tools/testopengl.py
function drawFunc (line 10) | def drawFunc():
FILE: examples/Knowledge_Representation_and_Reasoning/musicMemory/tools/xmlParser.py
function readXmlAsCsv (line 10) | def readXmlAsCsv(xmlPath='xml/'):
FILE: examples/MotorControl/experimental/brain_area.py
class MoColumnPOP (line 7) | class MoColumnPOP(nn.Module):
method __init__ (line 8) | def __init__(self,
method reset (line 29) | def reset(self):
method _emb_decode (line 34) | def _emb_decode(self, x):
method forward (line 42) | def forward(self, inputs):
class MotorCortex (line 48) | class MotorCortex(nn.Module):
method __init__ (line 49) | def __init__(self,
method reset (line 84) | def reset(self):
method _compute_motor_out (line 90) | def _compute_motor_out(self, inputs):
method forward (line 99) | def forward(self, inputs):
class Celebellum (line 110) | class Celebellum(nn.Module):
method __init__ (line 111) | def __init__(self,
method reset (line 136) | def reset(self):
method forward (line 140) | def forward(self, x):
FILE: examples/MotorControl/experimental/main.py
function creat_key_finger_emb (line 72) | def creat_key_finger_emb():
function mse_loss (line 81) | def mse_loss(pred, target):
function main (line 85) | def main():
FILE: examples/MotorControl/experimental/model.py
class Motion (line 7) | class Motion(nn.Module):
method __init__ (line 8) | def __init__(self, in_dims: int, out_dims: int=17, time_window: int=8,...
method forward (line 16) | def forward(self, x):
method learn (line 22) | def learn(self):
FILE: examples/Multiscale_Brain_Structure_Simulation/CorticothalamicColumn/model/cortex.py
class Cortex (line 11) | class Cortex():
method __init__ (line 17) | def __init__(self,neuronnumscale):
method setNeuronToIndex (line 31) | def setNeuronToIndex(self,node):
method setLayers (line 35) | def setLayers(self):
FILE: examples/Multiscale_Brain_Structure_Simulation/CorticothalamicColumn/model/cortex_thalamus.py
class Cortex_Thalamus (line 17) | class Cortex_Thalamus():
method __init__ (line 31) | def __init__(self, neuronnumscale):
method setSynapseNum (line 44) | def setSynapseNum(self):
method setLayer (line 51) | def setLayer(self):
method setNeuronsDendritesAndSynapes (line 62) | def setNeuronsDendritesAndSynapes(self):
method setSynapsesToDendrites (line 192) | def setSynapsesToDendrites(self):
method setCortexProperties (line 227) | def setCortexProperties(self):
method setThalamusProperties (line 229) | def setThalamusProperties(self):
method CreateCortexNetwork (line 232) | def CreateCortexNetwork(self):
method getTotalNeuronNumber (line 240) | def getTotalNeuronNumber(self):
method getTotalSynapseNumber (line 242) | def getTotalSynapseNumber(self):
method getCortexNeuronNumber (line 244) | def getCortexNeuronNumber(self):
method getThalamoNeuronNumber (line 246) | def getThalamoNeuronNumber(self):
method getSpecifiedNeuronNumber (line 248) | def getSpecifiedNeuronNumber(self,name):
method getNeuronTypesNumber (line 259) | def getNeuronTypesNumber(self):
method getNeuronTypes (line 261) | def getNeuronTypes(self):
method getCorticalSynapseNumber (line 263) | def getCorticalSynapseNumber(self):
method getThalamoSynapseNumber (line 265) | def getThalamoSynapseNumber(self):
method getPreAndPostNeuronsOfSynapse (line 267) | def getPreAndPostNeuronsOfSynapse(self,index):
method getCortexLayerNeuronNumber (line 272) | def getCortexLayerNeuronNumber(self,layername):
method getCortexLayerSynapseNumber (line 275) | def getCortexLayerSynapseNumber(self,layername):
method getCortexLayerNeuronTypes (line 278) | def getCortexLayerNeuronTypes(self,layername):
method getCortexLayerPreAndPostNeuronsOfSynapse (line 284) | def getCortexLayerPreAndPostNeuronsOfSynapse(self,layername,index):
method getNeuronAllPreNeuronsTypes (line 288) | def getNeuronAllPreNeuronsTypes(self,index):
method outputNeuronInfo (line 292) | def outputNeuronInfo(self):
method outputConnectionMatrix (line 301) | def outputConnectionMatrix(self):
method outputsynapspercent (line 321) | def outputsynapspercent(self,namelist):
method run (line 334) | def run(self):
method outputSpikeThreashold (line 344) | def outputSpikeThreashold(self):
FILE: examples/Multiscale_Brain_Structure_Simulation/CorticothalamicColumn/model/dendrite.py
class Dendrite (line 11) | class Dendrite():
method __init__ (line 16) | def __init__(self):
method setSynapse (line 24) | def setSynapse(self,syn):
method getSynapseInfo (line 35) | def getSynapseInfo(self,f,nodename,denpos):
FILE: examples/Multiscale_Brain_Structure_Simulation/CorticothalamicColumn/model/layer.py
class Layer (line 10) | class Layer():
method __init__ (line 21) | def __init__(self):
method getLayerNeuronNumber (line 29) | def getLayerNeuronNumber(self):
method getLayerSynapseNumber (line 31) | def getLayerSynapseNumber(self):
method getLayerNeuronTypes (line 33) | def getLayerNeuronTypes(self):
method stimulateNeuronInLayer4_BFS (line 36) | def stimulateNeuronInLayer4_BFS(self, T, neulist):
FILE: examples/Multiscale_Brain_Structure_Simulation/CorticothalamicColumn/model/synapse.py
class Synapse (line 7) | class Synapse():
method __init__ (line 17) | def __init__(self, pre,post,locationlayer):
method getInfo (line 31) | def getInfo(self,f,nodename,denpos,denlayer):
FILE: examples/Multiscale_Brain_Structure_Simulation/CorticothalamicColumn/model/thalamus.py
class Thalamus (line 7) | class Thalamus():
method __init__ (line 13) | def __init__(self):
method setNeuronToIndex (line 23) | def setNeuronToIndex(self,node):
method setThalamusProperties (line 27) | def setThalamusProperties(self):
FILE: examples/Multiscale_Brain_Structure_Simulation/CorticothalamicColumn/tools/exdata.py
class EXDATA (line 4) | class EXDATA():
method __int__ (line 5) | def __int__(self):
method getCortexData (line 8) | def getCortexData(self):
method getCortexData2 (line 28) | def getCortexData2(self):
method getLayerData (line 34) | def getLayerData(self):
method getNeuronData (line 54) | def getNeuronData(self):
method getSynapseData (line 72) | def getSynapseData(self, postneuron):
FILE: examples/Multiscale_Brain_Structure_Simulation/Corticothalamic_Brain_Model/Bioinformatics_propofol_circle.py
class brain_model_91 (line 13) | class brain_model_91():
method __init__ (line 15) | def __init__(self, W, D):
method simulation (line 155) | def simulation(self):
FILE: examples/Multiscale_Brain_Structure_Simulation/Corticothalamic_Brain_Model/spectrogram.py
function region_sxx (line 41) | def region_sxx(region):
function global_sxx (line 60) | def global_sxx():
function compare_sxx (line 76) | def compare_sxx():
FILE: examples/Multiscale_Brain_Structure_Simulation/HumanBrain/human_brain.py
class Syn (line 15) | class Syn(nn.Module):
method __init__ (line 16) | def __init__(self, syn, weight, neuron_num, tao_d, tao_r, dt, device):
method forward (line 33) | def forward(self, neuron):
class brain (line 42) | class brain(nn.Module):
method __init__ (line 43) | def __init__(self, syn, weight, neuron_model, p_neuron, dt, device):
method forward (line 52) | def forward(self, inputs):
function brain_region (line 57) | def brain_region(neuron_num):
function neuron_type (line 67) | def neuron_type(neuron_num, ratio, regions):
function syn_within_region (line 72) | def syn_within_region(syn_num, region):
function syn_cross_region (line 84) | def syn_cross_region(weight_matrix, region):
FILE: examples/Multiscale_Brain_Structure_Simulation/HumanBrain/human_multi.py
class MultiCompartmentaEIF (line 19) | class MultiCompartmentaEIF(BaseNode):
method __init__ (line 30) | def __init__(self,
method integral (line 62) | def integral(self,apical_inputs):
method calc_spike (line 71) | def calc_spike(self):
method forward (line 75) | def forward(self, inputs):
class aEIF (line 85) | class aEIF(BaseNode):
method __init__ (line 93) | def __init__(self, p, dt, device, *args, **kwargs):
method integral (line 133) | def integral(self, inputs):
method calc_spike (line 143) | def calc_spike(self):
method forward (line 149) | def forward(self, inputs):
class HHNode (line 157) | class HHNode(BaseNode):
method __init__ (line 170) | def __init__(self, p, dt, device, act_fun=AtanGrad, *args, **kwargs):
method integral (line 203) | def integral(self, inputs):
method calc_spike (line 237) | def calc_spike(self):
method forward (line 240) | def forward(self, inputs):
method requires_activation (line 245) | def requires_activation(self):
class Syn (line 248) | class Syn(nn.Module):
method __init__ (line 249) | def __init__(self, syn, weight, neuron_num, tao_d, tao_r, dt, device):
method forward (line 266) | def forward(self, neuron):
class brain (line 275) | class brain(nn.Module):
method __init__ (line 276) | def __init__(self, syn, weight, neuron_model, p_neuron, dt, device):
method forward (line 287) | def forward(self, inputs):
function brain_region (line 292) | def brain_region(neuron_num):
function neuron_type (line 302) | def neuron_type(neuron_num, ratio, regions):
function syn_within_region (line 307) | def syn_within_region(syn_num, region):
function syn_cross_region (line 319) | def syn_cross_region(weight_matrix, region):
function neuron_delete (line 484) | def neuron_delete(model, rate):
function syn_delete (line 491) | def syn_delete(model, rate):
function syn_strength (line 502) | def syn_strength(model, rate):
FILE: examples/Multiscale_Brain_Structure_Simulation/Human_Brain_Model/NA.py
function histogram_entropy (line 16) | def histogram_entropy(data, bins='auto'):
function hub_degree (line 38) | def hub_degree(df, W_new):
function visual (line 57) | def visual(df, W_new):
FILE: examples/Multiscale_Brain_Structure_Simulation/Human_Brain_Model/main_246.py
class brain_model (line 14) | class brain_model():
method __init__ (line 16) | def __init__(self, W):
method simulation (line 173) | def simulation(self, per):
FILE: examples/Multiscale_Brain_Structure_Simulation/Human_Brain_Model/main_84.py
class brain_model (line 14) | class brain_model():
method __init__ (line 16) | def __init__(self, W):
method simulation (line 173) | def simulation(self, per):
FILE: examples/Multiscale_Brain_Structure_Simulation/Human_Brain_Model/pci.py
function generate_rm (line 15) | def generate_rm(Iraster):
function lempel_ziv_complexity (line 70) | def lempel_ziv_complexity(data):
FILE: examples/Multiscale_Brain_Structure_Simulation/Human_Brain_Model/pci_246.py
function generate_rm (line 15) | def generate_rm(Iraster):
function lempel_ziv_complexity (line 70) | def lempel_ziv_complexity(data):
FILE: examples/Multiscale_Brain_Structure_Simulation/Human_Brain_Model/spectrogram.py
function region_sxx (line 42) | def region_sxx(region):
function global_sxx (line 58) | def global_sxx():
function compare_sxx (line 74) | def compare_sxx():
function fit (line 99) | def fit(xx, yy):
FILE: examples/Multiscale_Brain_Structure_Simulation/Human_PFC_Model/Six_Layer_PFC.py
class six_layer_pfc (line 12) | class six_layer_pfc():
method __init__ (line 18) | def __init__(self):
method picture (line 30) | def picture(self,path=None):
method mex_function (line 48) | def mex_function(self, path=None):
class SynType (line 623) | class SynType:
method __init__ (line 624) | def __init__(self):
class Neuron (line 641) | class Neuron:
method __init__ (line 653) | def __init__(self):
class InpNeuron (line 685) | class InpNeuron:
method __init__ (line 692) | def __init__(self):
class Synapse (line 703) | class Synapse:
method __init__ (line 707) | def __init__(self):
class SynDepr (line 715) | class SynDepr:
method __init__ (line 723) | def __init__(self):
class SynList (line 733) | class SynList:
method __init__ (line 737) | def __init__(self):
FILE: examples/Multiscale_Brain_Structure_Simulation/MacaqueBrain/macaque_brain.py
class Syn (line 14) | class Syn(nn.Module):
method __init__ (line 15) | def __init__(self, syn, weight, neuron_num, tao_d, tao_r, dt, device):
method forward (line 32) | def forward(self, neuron):
class brain (line 41) | class brain(nn.Module):
method __init__ (line 42) | def __init__(self, syn, weight, neuron_model, p_neuron, dt, device):
method forward (line 51) | def forward(self, inputs):
function brain_region (line 56) | def brain_region(neuron_num):
function neuron_type (line 66) | def neuron_type(neuron_num, ratio, regions):
function syn_within_region (line 71) | def syn_within_region(syn_num, region):
function syn_cross_region (line 83) | def syn_cross_region(weight_matrix, region):
FILE: examples/Multiscale_Brain_Structure_Simulation/MouseBrain/mouse_brain.py
class Syn (line 14) | class Syn(nn.Module):
method __init__ (line 15) | def __init__(self, syn, weight, neuron_num, tao_d, tao_r, dt, device):
method forward (line 32) | def forward(self, neuron):
class brain (line 41) | class brain(nn.Module):
method __init__ (line 42) | def __init__(self, syn, weight, neuron_model, p_neuron, dt, device):
method forward (line 51) | def forward(self, inputs):
function brain_region (line 56) | def brain_region(neuron_num):
function neuron_type (line 66) | def neuron_type(neuron_num, ratio, regions):
function syn_within_region (line 71) | def syn_within_region(syn_num, region):
function syn_cross_region (line 83) | def syn_cross_region(weight_matrix, region):
FILE: examples/Perception_and_Learning/Conversion/burst_conversion/CIFAR10_VGG16.py
class VGG16 (line 17) | class VGG16(nn.Module):
method __init__ (line 18) | def __init__(self, relu_max=1): # 1 3e38
method forward (line 43) | def forward(self, input):
function get_cifar10_loader (line 50) | def get_cifar10_loader(batch_size, train_batch=None, num_workers=4, conv...
function train (line 74) | def train(net, train_iter, test_iter, optimizer, scheduler, device, num_...
function evaluate_accuracy (line 117) | def evaluate_accuracy(data_iter, net, device=None, only_onebatch=False):
FILE: examples/Perception_and_Learning/Conversion/burst_conversion/converted_CIFAR10.py
function evaluate_snn (line 44) | def evaluate_snn(test_iter, snn, device=None, duration=50):
FILE: examples/Perception_and_Learning/Conversion/msat_conversion/CIFAR10_VGG16.py
class VGG16 (line 17) | class VGG16(nn.Module):
method __init__ (line 18) | def __init__(self, relu_max=1): # 1 3e38
method forward (line 43) | def forward(self, input):
function get_cifar10_loader (line 50) | def get_cifar10_loader(batch_size, train_batch=None, num_workers=4, conv...
function train (line 74) | def train(net, train_iter, test_iter, optimizer, scheduler, device, num_...
function evaluate_accuracy (line 117) | def evaluate_accuracy(data_iter, net, device=None, only_onebatch=False):
FILE: examples/Perception_and_Learning/Conversion/msat_conversion/converted_CIFAR10.py
function evaluate_snn (line 46) | def evaluate_snn(test_iter, snn, device=None, duration=50):
FILE: examples/Perception_and_Learning/Conversion/msat_conversion/convertor.py
class FolderPath (line 13) | class FolderPath:
class HookScale (line 16) | class HookScale(nn.Module):
method __init__ (line 23) | def __init__(self,
method forward (line 38) | def forward(self, x):
class Hookoutput (line 52) | class Hookoutput(nn.Module):
method __init__ (line 57) | def __init__(self, module):
method forward (line 62) | def forward(self, x):
class Scale (line 68) | class Scale(nn.Module):
method __init__ (line 73) | def __init__(self, scale: float = 1.0):
method forward (line 77) | def forward(self, x):
function reset (line 84) | def reset(self):
class Convertor (line 97) | class Convertor(nn.Module):
method __init__ (line 118) | def __init__(self,
method forward (line 146) | def forward(self, model):
method register_hook (line 157) | def register_hook(model, p=0.99, channelnorm=False, gamma=0.999):
method get_percentile (line 172) | def get_percentile(model, dataloader, device, batch_num=1):
method replace_for_spike (line 184) | def replace_for_spike(model, lipool=True, soft_mode=True, gamma=1, spi...
class SNode (line 208) | class SNode(nn.Module):
method __init__ (line 215) | def __init__(self, soft_mode=False, gamma=5, useDET=False, useDTT=Fals...
method forward (line 256) | def forward(self, x):
method hard_reset (line 317) | def hard_reset(self):
method soft_reset (line 323) | def soft_reset(self):
method reset (line 329) | def reset(self):
FILE: examples/Perception_and_Learning/IllusionPerception/AbuttingGratingIllusion/distortion/abutting_grating_illusion/abutting_grating_distortion.py
function save_image (line 30) | def save_image(image, filename):
function get_mnist_data (line 37) | def get_mnist_data(train = False, batch_size = 100):
function get_silhouette_data (line 56) | def get_silhouette_data(path):
function ag_distort_28 (line 75) | def ag_distort_28(imgs, threshold=0, interval=4, phase=2, direction=(1,0)):
function transform_224 (line 95) | def transform_224(imgs):
function ag_distort_224 (line 100) | def ag_distort_224(imgs, threshold=0, interval=8, phase=4, direction=(1,...
function ag_distort_silhouette (line 123) | def ag_distort_silhouette(imgs, threshold=0.5, interval=2, phase=1, dire...
FILE: examples/Perception_and_Learning/MultisensoryIntegration/code/MultisensoryIntegrationDEMO_AM.py
class AMNet (line 10) | class AMNet(nn.Module):
method __init__ (line 11) | def __init__(self,
method forward (line 28) | def forward(self, x):
function get_concept_dataset_dic_and_AM_initial_weights_lst (line 35) | def get_concept_dataset_dic_and_AM_initial_weights_lst(BBSR_path):
function convert_vec_into_spike_trains (line 76) | def convert_vec_into_spike_trains(each_concept_vec):
function reducing_to_binarycode (line 84) | def reducing_to_binarycode(post_neuron_states_lst, tolerance):
FILE: examples/Perception_and_Learning/MultisensoryIntegration/code/MultisensoryIntegrationDEMO_IM.py
class IMNet (line 12) | class IMNet(nn.Module):
method __init__ (line 13) | def __init__(self,
method forward (line 30) | def forward(self, x):
function get_concept_dataset_dic_and_initial_weights_lst (line 37) | def get_concept_dataset_dic_and_initial_weights_lst(BBSR_path):
function convert_vec_into_spike_trains (line 70) | def convert_vec_into_spike_trains(each_concept_vec):
function reducing_to_binarycode (line 78) | def reducing_to_binarycode(post_neuron_states_lst, tolerance):
FILE: examples/Perception_and_Learning/MultisensoryIntegration/code/measure_and_visualization.py
function get_concept_dataset_dic_and_initial_weights_lst (line 4) | def get_concept_dataset_dic_and_initial_weights_lst(BBSR_path):
function load_binarycode_dic (line 36) | def load_binarycode_dic(filename):
function load_m_dataset_concept_set_lst (line 42) | def load_m_dataset_concept_set_lst(m_dataset_name):
function load_McRae_concept_feature_lst (line 48) | def load_McRae_concept_feature_lst():
function load_CSLB_concept_feature_lst (line 64) | def load_CSLB_concept_feature_lst():
function get_m_dataset_concept_k_similar_concepts_dic (line 82) | def get_m_dataset_concept_k_similar_concepts_dic(m_dataset_name, overlap...
function get_dataset_concept_ME_dic (line 101) | def get_dataset_concept_ME_dic(origin_dataset_dic):
function get_dataset_concept_k_similar_concepts_ranking_dic_dic (line 113) | def get_dataset_concept_k_similar_concepts_ranking_dic_dic (dataset_dic,...
function get_vec_Harmming_similarity (line 131) | def get_vec_Harmming_similarity(concept1_vecstr, concept2_vecstr):
function get_ME_kAR_corr (line 135) | def get_ME_kAR_corr(binarycode_type, concept_ME_dic, m_dataset, k):
function visualize_results (line 175) | def visualize_results(ME_lst, raking_mean_lst, jointplot_file):
FILE: examples/Perception_and_Learning/NeuEvo/auto_augment.py
class ImageNetPolicy (line 14) | class ImageNetPolicy(object):
method __init__ (line 28) | def __init__(self, fillcolor=(128, 128, 128)):
method __call__ (line 60) | def __call__(self, img):
method __repr__ (line 64) | def __repr__(self):
class CIFAR10Policy (line 68) | class CIFAR10Policy(object):
method __init__ (line 82) | def __init__(self, fillcolor=(128, 128, 128)):
method __call__ (line 114) | def __call__(self, img):
method __repr__ (line 118) | def __repr__(self):
class SVHNPolicy (line 122) | class SVHNPolicy(object):
method __init__ (line 136) | def __init__(self, fillcolor=(128, 128, 128)):
method __call__ (line 168) | def __call__(self, img):
method __repr__ (line 172) | def __repr__(self):
class SubPolicy (line 176) | class SubPolicy(object):
method __init__ (line 178) | def __init__(self,
method __call__ (line 269) | def __call__(self, img):
FILE: examples/Perception_and_Learning/NeuEvo/main.py
function _parse_args (line 345) | def _parse_args():
function main (line 362) | def main():
function train_epoch (line 722) | def train_epoch(
function validate (line 857) | def validate(epoch, model, loader, loss_fn, args, amp_autocast=suppress,
FILE: examples/Perception_and_Learning/NeuEvo/separate_loss.py
class MseSeparateLoss (line 10) | class MseSeparateLoss(nn.modules.loss._Loss):
method __init__ (line 12) | def __init__(self, weight=0.1, size_average=None, ignore_index=-100,
method forward (line 19) | def forward(self, input1, target1, input2):
class ConvSeparateLoss (line 26) | class ConvSeparateLoss(nn.modules.loss._Loss):
method __init__ (line 29) | def __init__(self, loss1_fn, weight=0.1, size_average=None, ignore_ind...
method forward (line 36) | def forward(self, input1, target1, input2):
class TriSeparateLoss (line 59) | class TriSeparateLoss(nn.modules.loss._Loss):
method __init__ (line 62) | def __init__(self, loss1_fn, weight=0.1, size_average=None, ignore_ind...
method forward (line 69) | def forward(self, input1, target1, input2):
FILE: examples/Perception_and_Learning/NeuEvo/train.py
class TrainNetwork (line 75) | class TrainNetwork(object):
method __init__ (line 78) | def __init__(self, args):
method _init_log (line 87) | def _init_log(self):
method _init_device (line 100) | def _init_device(self):
method _init_data_queue (line 115) | def _init_data_queue(self):
method _init_model (line 136) | def _init_model(self):
method run (line 195) | def run(self):
method train (line 232) | def train(self):
method infer (line 267) | def infer(self):
FILE: examples/Perception_and_Learning/NeuEvo/train_search.py
function main (line 138) | def main():
function train (line 297) | def train(epoch, train_queue, valid_queue, model, architect, criterion, ...
function infer (line 352) | def infer(valid_queue, model, criterion):
FILE: examples/Perception_and_Learning/NeuEvo/utils.py
class AvgrageMeter (line 21) | class AvgrageMeter(object):
method __init__ (line 23) | def __init__(self):
method reset (line 26) | def reset(self):
method update (line 31) | def update(self, val, n=1):
function accuracy (line 37) | def accuracy(output, target, topk=(1,)):
class Cutout (line 57) | class Cutout(object):
method __init__ (line 58) | def __init__(self, length):
method __call__ (line 61) | def __call__(self, img):
function _data_transforms_cifar (line 79) | def _data_transforms_cifar(args):
function count_parameters_in_MB (line 112) | def count_parameters_in_MB(model):
function save_checkpoint (line 116) | def save_checkpoint(state, is_best, save):
function save (line 124) | def save(model, model_path):
function load (line 128) | def load(model, model_path):
function drop_path (line 132) | def drop_path(x, drop_prob):
function create_exp_dir (line 142) | def create_exp_dir(path, scripts_to_save=None):
function calc_time (line 154) | def calc_time(seconds):
function save_file (line 161) | def save_file(recoder, path='./', back_connection=False):
FILE: examples/Perception_and_Learning/QSNN/main.py
function int2onehot (line 39) | def int2onehot(label, classes, factor):
function train (line 45) | def train(net, epochs, lr):
FILE: examples/Perception_and_Learning/UnsupervisedSTDP/codef.py
class STDPConv (line 46) | class STDPConv(nn.Module):
method __init__ (line 47) | def __init__(self, in_planes, out_planes, kernel_size, stride, padding...
method mem_update (line 65) | def mem_update(self, x, onespike=True): # b,c,h,w
method forward (line 77) | def forward(self, x, T=None, onespike=True):
method reset (line 107) | def reset(self):
method normgrad (line 110) | def normgrad(self, force=False):
method normweight (line 121) | def normweight(self, clip=False):
method getthresh (line 136) | def getthresh(self, scale):
class STDPLinear (line 143) | class STDPLinear(nn.Module):
method __init__ (line 144) | def __init__(self, in_planes, out_planes,
method mem_update (line 161) | def mem_update(self, x, onespike=True): # b,c,h,w
method forward (line 176) | def forward(self, x, T, onespike=True):
method reset (line 204) | def reset(self):
method normgrad (line 207) | def normgrad(self, force=False):
method normweight (line 218) | def normweight(self, clip=False):
method getthresh (line 231) | def getthresh(self, scale):
method updatethresh (line 236) | def updatethresh(self, plus=0.05):
class STDPFlatten (line 243) | class STDPFlatten(nn.Module):
method __init__ (line 244) | def __init__(self, start_dim=0, end_dim=-1):
method forward (line 248) | def forward(self, x, T): # [batch,T,c,w,h]
class STDPMaxPool (line 253) | class STDPMaxPool(nn.Module):
method __init__ (line 254) | def __init__(self, kernel_size, stride, padding, static=True):
method forward (line 259) | def forward(self, x, T): # [batch,T,c,w,h]
class Normliaze (line 274) | class Normliaze(nn.Module):
method __init__ (line 275) | def __init__(self, static=True):
method forward (line 279) | def forward(self, x, T): # [batch,T,c,w,h]
class voting (line 287) | class voting(nn.Module):
method __init__ (line 289) | def __init__(self, shape):
method assign_labels (line 293) | def assign_labels(self, spikes, labels, rates=None, n_labels=10, alpha...
method get_label (line 315) | def get_label(self, spikes):
class Conv_Net (line 333) | class Conv_Net(nn.Module):
method __init__ (line 334) | def __init__(self):
method forward (line 351) | def forward(self, x, inlayer, outlayer, T, onespike=True): # [b,t,w,h]
method normgrad (line 357) | def normgrad(self, layer, force=False):
method normweight (line 360) | def normweight(self, layer, clip=False):
method updatethresh (line 363) | def updatethresh(self, layer, plus=0.05):
method reset (line 366) | def reset(self, layer):
function plot_confusion_matrix (line 374) | def plot_confusion_matrix(cm, classes, normalize=True, title='Test Confu...
FILE: examples/Perception_and_Learning/img_cls/bp/main.py
function _parse_args (line 348) | def _parse_args():
function resnet50d_pretrained (line 366) | def resnet50d_pretrained(*args, **kwargs):
function main (line 373) | def main():
function train_epoch (line 745) | def train_epoch(
function validate (line 895) | def validate(epoch, model, loader, loss_fn, args, amp_autocast=suppress,
FILE: examples/Perception_and_Learning/img_cls/bp/main_backei.py
function train (line 69) | def train(epoch):
function eval (line 98) | def eval(epoch):
function main (line 119) | def main():
FILE: examples/Perception_and_Learning/img_cls/bp/main_simplified.py
function _parse_args (line 189) | def _parse_args():
function main (line 206) | def main():
function train_epoch (line 366) | def train_epoch(
function validate (line 449) | def validate(model, loader, loss_fn, args, log_suffix='', visualize=Fals...
FILE: examples/Perception_and_Learning/img_cls/glsnn/cls_glsnn.py
function train (line 52) | def train(epoch):
function eval (line 81) | def eval(epoch):
function main (line 101) | def main():
FILE: examples/Perception_and_Learning/img_cls/spiking_capsnet/spikingcaps.py
class myLIFnode (line 21) | class myLIFnode(LIFNode):
method __init__ (line 22) | def __init__(self, threshold=0.5, tau=2., *args, **kwargs):
method integral (line 25) | def integral(self, inputs):
class ConvLayer (line 30) | class ConvLayer(nn.Module):
method __init__ (line 31) | def __init__(self, in_channels=1, out_channels=256, kernel_size=9):
method forward (line 35) | def forward(self, x):
class PrimaryCaps (line 39) | class PrimaryCaps(nn.Module):
method __init__ (line 40) | def __init__(self, num_capsules=8, in_channels=256, out_channels=32, k...
method forward (line 47) | def forward(self, x):
class DigitCaps (line 55) | class DigitCaps(nn.Module):
method __init__ (line 56) | def __init__(self, num_capsules=10, num_routes=32 * 6 * 6, in_channels...
method forward (line 69) | def forward(self, x):
class DigitCaps2 (line 78) | class DigitCaps2(nn.Module):
method __init__ (line 79) | def __init__(self, num_capsules=10, num_routes=32 * 6 * 6):
method forward (line 87) | def forward(self, u_hat):
method init_bij (line 92) | def init_bij(self):
class Decoder (line 97) | class Decoder(nn.Module):
method __init__ (line 98) | def __init__(self):
method forward (line 102) | def forward(self, x):
class CapsNet (line 108) | class CapsNet(nn.Module):
method __init__ (line 109) | def __init__(self):
method forward (line 122) | def forward(self, data, time_window=5, train=True):
method init (line 152) | def init(self):
function evaluate (line 159) | def evaluate(test_iter, net, device):
FILE: examples/Perception_and_Learning/img_cls/transfer_for_dvs/GradCAM_visualization.py
function get_proj (line 366) | def get_proj(self):
function event_vis_raw (line 415) | def event_vis_raw(x):
function get_dataloader_ncal (line 449) | def get_dataloader_ncal(step, **kwargs):
function _parse_args (line 486) | def _parse_args():
function main (line 503) | def main():
function event_frame_plot_2d (line 848) | def event_frame_plot_2d(event):
FILE: examples/Perception_and_Learning/img_cls/transfer_for_dvs/datasets.py
class TransferSampler (line 50) | class TransferSampler(torch.utils.data.sampler.Sampler):
method __init__ (line 56) | def __init__(self, indices):
method __iter__ (line 59) | def __iter__(self):
method __len__ (line 62) | def __len__(self):
class Transfer_DataSet (line 65) | class Transfer_DataSet(torchvision.datasets.VisionDataset):
method __init__ (line 66) | def __init__(self, data, label):
method __getitem__ (line 71) | def __getitem__(self, mask):
method __len__ (line 76) | def __len__(self):
class ConvertHSV (line 81) | class ConvertHSV(object):
method __init__ (line 87) | def __init__(self):
method __call__ (line 91) | def __call__(self, img):
function unpack_mix_param (line 103) | def unpack_mix_param(args):
function build_transform (line 116) | def build_transform(is_train, img_size, use_hsv=True):
function build_dataset (line 167) | def build_dataset(is_train, img_size, dataset, path, same_da=False, use_...
class MNISTData (line 199) | class MNISTData(object):
method __init__ (line 204) | def __init__(self,
method get_data_loaders (line 221) | def get_data_loaders(self):
method get_standard_data (line 235) | def get_standard_data(self):
function get_mnist_data (line 246) | def get_mnist_data(batch_size, num_workers=8, same_da=False, **kwargs):
function get_fashion_data (line 292) | def get_fashion_data(batch_size, num_workers=8, same_da=False, **kwargs):
function get_cifar10_data (line 325) | def get_cifar10_data(batch_size, num_workers=8, same_da=False, **kwargs):
function get_cifar100_data (line 351) | def get_cifar100_data(batch_size, num_workers=8, same_data=False, *args,...
function get_transfer_cifar10_data (line 374) | def get_transfer_cifar10_data(batch_size, num_workers=8, same_da=False, ...
function get_combined_cifar10_data (line 399) | def get_combined_cifar10_data(batch_size, num_workers=8, same_da=False, ...
function get_transfer_CALTECH101_data (line 413) | def get_transfer_CALTECH101_data(batch_size, num_workers=8, same_da=Fals...
function get_combined_CALTECH101_data (line 435) | def get_combined_CALTECH101_data(batch_size, num_workers=8, same_da=Fals...
function get_TinyImageNet_data (line 457) | def get_TinyImageNet_data(batch_size, num_workers=8, same_da=False, *arg...
function get_transfer_imnet_data (line 490) | def get_transfer_imnet_data(args, _logger, data_config, num_aug_splits, ...
function get_dvsg_data (line 529) | def get_dvsg_data(batch_size, step, **kwargs):
function get_dvsc10_data (line 629) | def get_dvsc10_data(batch_size, step, dvs_da=False, **kwargs):
function get_transfer_dvsc10_data (line 747) | def get_transfer_dvsc10_data(batch_size, step, dvs_da=False, **kwargs):
function get_NCALTECH101_data (line 837) | def get_NCALTECH101_data(batch_size, step, dvs_da=False, **kwargs):
function get_transfer_NCALTECH101_data (line 974) | def get_transfer_NCALTECH101_data(batch_size, step, dvs_da=False, **kwar...
function get_NCARS_data (line 1067) | def get_NCARS_data(batch_size, step, **kwargs):
function get_nomni_data (line 1165) | def get_nomni_data(batch_size, train_portion=1., **kwargs):
function get_transfer_omni_data (line 1229) | def get_transfer_omni_data(batch_size, train_portion=1., **kwargs):
function get_esimnet_data (line 1259) | def get_esimnet_data(batch_size, step, **kwargs):
function get_CUB2002011_data (line 1359) | def get_CUB2002011_data(batch_size, num_workers=8, same_da=False, *args,...
function get_StanfordCars_data (line 1390) | def get_StanfordCars_data(batch_size, num_workers=8, same_da=False, *arg...
function get_StanfordDogs_data (line 1421) | def get_StanfordDogs_data(batch_size, num_workers=8, same_da=False, *arg...
function get_FGVCAircraft_data (line 1453) | def get_FGVCAircraft_data(batch_size, num_workers=8, same_da=False, *arg...
function get_Flowers102_data (line 1485) | def get_Flowers102_data(batch_size, num_workers=8, same_da=False, *args,...
FILE: examples/Perception_and_Learning/img_cls/transfer_for_dvs/main.py
function _parse_args (line 359) | def _parse_args():
function main (line 376) | def main():
function train_epoch (line 745) | def train_epoch(
function validate (line 944) | def validate(epoch, model, loader, loss_fn, args, amp_autocast=suppress,
FILE: examples/Perception_and_Learning/img_cls/transfer_for_dvs/main_transfer.py
function _parse_args (line 384) | def _parse_args():
function main (line 401) | def main():
function train_epoch (line 860) | def train_epoch(
function validate (line 1211) | def validate(epoch, model, loader, loss_fn, args, amp_autocast=suppress,
FILE: examples/Perception_and_Learning/img_cls/transfer_for_dvs/main_visual_losslandscape.py
function _parse_args (line 402) | def _parse_args():
function main (line 419) | def main():
FILE: examples/Snn_safety/DPSNN/load_data.py
class CustomDataset (line 23) | class CustomDataset(Dataset):
method __init__ (line 27) | def __init__(self, dataset, indices):
method __len__ (line 31) | def __len__(self):
method __getitem__ (line 34) | def __getitem__(self, item):
function load_static_data (line 39) | def load_static_data(data_root, batch_size, dataset):
function load_dvs10_data (line 107) | def load_dvs10_data(batch_size, step, **kwargs):
function load_nmnist_data (line 161) | def load_nmnist_data(batch_size, step, **kwargs):
FILE: examples/Snn_safety/DPSNN/main_dpsnn.py
function train (line 38) | def train(model, device, train_loader, optimizer, epoch, privacy_engine):
function test (line 76) | def test(model, device, test_loader):
function run (line 104) | def run():
FILE: examples/Snn_safety/DPSNN/model.py
class TEP (line 13) | class TEP(nn.Module):
method __init__ (line 14) | def __init__(self, step, channel, device=None, dtype=None):
method forward (line 21) | def forward(self, x):
class BaseConvNet (line 33) | class BaseConvNet(BaseModule, abc.ABC):
method __init__ (line 34) | def __init__(self,
method _create_feature (line 77) | def _create_feature(self):
method _create_fc (line 81) | def _create_fc(self):
method forward (line 84) | def forward(self, inputs):
class LayerWiseConvModule (line 123) | class LayerWiseConvModule(nn.Module):
method __init__ (line 136) | def __init__(self,
method forward (line 164) | def forward(self, x):
class LayerWiseLinearModule (line 177) | class LayerWiseLinearModule(nn.Module):
method __init__ (line 188) | def __init__(self,
method forward (line 217) | def forward(self, x):
class LayWiseConvNet (line 239) | class LayWiseConvNet(BaseConvNet):
method __init__ (line 240) | def __init__(self,
method _create_feature (line 262) | def _create_feature(self):
method _create_fc (line 303) | def _create_fc(self):
function cifar_convnet (line 314) | def cifar_convnet(step,
function dvs_convnet (line 342) | def dvs_convnet(step,
class SimpleSNN (line 372) | class SimpleSNN(BaseModule, abc.ABC):
method __init__ (line 373) | def __init__(self,
method forward (line 400) | def forward(self, inputs):
FILE: examples/Snn_safety/RandHet-SNN/evaluate.py
function get_args (line 28) | def get_args():
function evaluate_attack (line 44) | def evaluate_attack(model, test_loader, args, atk, atk_name, logger):
function main (line 74) | def main():
FILE: examples/Snn_safety/RandHet-SNN/my_node.py
class RHLIFNode (line 4) | class RHLIFNode(BaseNode):
method __init__ (line 23) | def __init__(self, threshold=0.5, tau=0., sigma=1.0, act_fun=AtanGrad,...
method integral (line 36) | def integral(self, inputs):
method calc_spike (line 40) | def calc_spike(self):
method n_reset (line 45) | def n_reset(self):
class RHLIFNode2 (line 52) | class RHLIFNode2(BaseNode):
method __init__ (line 71) | def __init__(self, threshold=0.5, tau=0., sigma=1.0, act_fun=AtanGrad,...
method integral (line 85) | def integral(self, inputs):
method calc_spike (line 91) | def calc_spike(self):
method n_reset (line 96) | def n_reset(self):
FILE: examples/Snn_safety/RandHet-SNN/sew_resnet.py
function sew_function (line 16) | def sew_function(x: torch.Tensor, y: torch.Tensor, cnf: str):
function conv3x3 (line 27) | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
function conv1x1 (line 33) | def conv1x1(in_planes, out_planes, stride=1):
class BasicBlock (line 38) | class BasicBlock(nn.Module):
method __init__ (line 41) | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
method forward (line 63) | def forward(self, x):
method extra_repr (line 81) | def extra_repr(self) -> str:
class Bottleneck (line 85) | class Bottleneck(nn.Module):
method __init__ (line 94) | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
method forward (line 116) | def forward(self, x):
method extra_repr (line 138) | def extra_repr(self) -> str:
class SEWResNet19 (line 142) | class SEWResNet19(BaseModule):
method __init__ (line 143) | def __init__(self, block, layers, num_classes=1000, step=8, encode_typ...
method _make_layer (line 206) | def _make_layer(self, block, planes, blocks, stride=1, dilate=False, c...
method _forward_impl (line 231) | def _forward_impl(self, inputs):
method _forward_once (line 263) | def _forward_once(self, x):
method forward (line 282) | def forward(self, x):
FILE: examples/Snn_safety/RandHet-SNN/train.py
function get_args (line 28) | def get_args():
function main (line 63) | def main():
FILE: examples/Snn_safety/RandHet-SNN/utils.py
function get_norm_stat (line 14) | def get_norm_stat(mean, std):
function clamp (line 23) | def clamp(X, lower_limit, upper_limit):
function normalize_fn (line 27) | def normalize_fn(tensor, mean, std):
class NormalizeByChannelMeanStd (line 35) | class NormalizeByChannelMeanStd(nn.Module):
method __init__ (line 36) | def __init__(self, mean, std):
method forward (line 45) | def forward(self, tensor):
method extra_repr (line 48) | def extra_repr(self):
function get_loaders (line 52) | def get_loaders(dir_, batch_size, dataset='cifar10', worker=4, norm=True):
function evaluate_standard (line 107) | def evaluate_standard(test_loader, model, args):
function orthogonal_retraction (line 124) | def orthogonal_retraction(model, beta=0.002):
FILE: examples/Social_Cognition/FOToM/algorithms/ToM_class.py
class ToM1 (line 7) | class ToM1(object):
method __init__ (line 14) | def __init__(self, tom_base, alg_types, agent_types, num_lm, device, h...
method _agent_tom1_init (line 34) | def _agent_tom1_init(self):
method _get_index1 (line 48) | def _get_index1(self, lst=None, item=''):
method c_function (line 51) | def c_function(self, tom0_actions_q, tom1_actions_q):
method tom1_output (line 70) | def tom1_output(self, agent_i, adv_indx, good_indx, obs_, acs_pre_):
FILE: examples/Social_Cognition/FOToM/algorithms/maddpg.py
class MADDPG (line 17) | class MADDPG(object):
method __init__ (line 18) | def __init__(self, agent_init_params, alg_types, device,
method policies (line 55) | def policies(self):
method target_policies (line 59) | def target_policies(self):
method scale_noise (line 62) | def scale_noise(self, scale):
method reset_noise (line 71) | def reset_noise(self):
method step (line 75) | def step(self, observations, explore=False):
method update (line 87) | def update(self, sample, agent_i, parallel=False, logger=None):
method update_all_targets (line 179) | def update_all_targets(self):
method prep_training (line 189) | def prep_training(self, device='gpu'):
method prep_rollouts (line 216) | def prep_rollouts(self, device='cpu'):
method save (line 229) | def save(self, filename):
method init_from_env (line 239) | def init_from_env(cls, env, device, agent_alg="MADDPG", adversary_alg=...
method init_from_save (line 291) | def init_from_save(cls, filename):
class MADDPG_RNN (line 302) | class MADDPG_RNN(object):
method __init__ (line 306) | def __init__(self, agent_init_params, alg_types,
method _init_agent (line 341) | def _init_agent(self, n_rollout_threads):
method policies (line 347) | def policies(self, len_ep):
method target_policies (line 353) | def target_policies(self, len_ep):
method scale_noise (line 358) | def scale_noise(self, scale):
method reset_noise (line 367) | def reset_noise(self):
method step (line 371) | def step(self, observations, explore=False):
method _compute_rnn (line 383) | def _compute_rnn(self, fn, hidden, inputs, logit):
method update (line 398) | def update(self, sample, agent_i, parallel=False, logger=None):
method update_all_targets (line 481) | def update_all_targets(self):
method prep_training (line 491) | def prep_training(self, device='gpu'):
method prep_rollouts (line 518) | def prep_rollouts(self, device='cpu'):
method save (line 531) | def save(self, filename):
method init_from_env (line 541) | def init_from_env(cls, env, agent_alg="MADDPG", adversary_alg="MADDPG_...
method init_from_save (line 580) | def init_from_save(cls, filename):
FILE: examples/Social_Cognition/FOToM/algorithms/tom11.py
class ToM_decision11 (line 20) | class ToM_decision11(object):
method __init__ (line 22) | def __init__(self, agent_init_params, alg_types, agent_types, num_lm,
method policies (line 87) | def policies(self):
method target_policies (line 91) | def target_policies(self):
method scale_noise (line 94) | def scale_noise(self, scale):
method reset_noise (line 103) | def reset_noise(self):
method _get_index1 (line 107) | def _get_index1(self, lst=None, item=''):
method _agent_tom_init (line 110) | def _agent_tom_init(self):
method step (line 124) | def step(self, observations, actions_pre, explore=False): #simple_tag
method _get_obs (line 195) | def _get_obs(self, observations, action_tom):
method train_tom0 (line 205) | def train_tom0(self, sample, agent_i):
method tom1_infer_other (line 245) | def tom1_infer_other(self, sample):
method tom0_output (line 328) | def tom0_output(self, sample):
method update (line 365) | def update(self, sample, agent_i, parallel=False, logger=None, sample_...
method update_all_targets (line 509) | def update_all_targets(self):
method prep_training (line 519) | def prep_training(self, device='gpu'):
method prep_rollouts (line 563) | def prep_rollouts(self, device='cpu'):
method save (line 576) | def save(self, filename):
method init_from_env (line 587) | def init_from_env(cls, env, config, device, agent_alg, adversary_alg,
method init_from_save (line 647) | def init_from_save(cls, filename):
method get_params (line 661) | def get_params(self):
method load_params (line 671) | def load_params(self, params):
FILE: examples/Social_Cognition/FOToM/common/distributions.py
class Pd (line 10) | class Pd(object):
method flatparam (line 14) | def flatparam(self):
method mode (line 16) | def mode(self):
method logp (line 18) | def logp(self, x):
method kl (line 20) | def kl(self, other):
method entropy (line 22) | def entropy(self):
method sample (line 24) | def sample(self):
class PdType (line 27) | class PdType(object):
method pdclass (line 31) | def pdclass(self):
method pdfromflat (line 33) | def pdfromflat(self, flat):
method param_shape (line 35) | def param_shape(self):
method sample_shape (line 37) | def sample_shape(self):
method sample_dtype (line 39) | def sample_dtype(self):
method param_placeholder (line 42) | def param_placeholder(self, prepend_shape, name=None):
method sample_placeholder (line 44) | def sample_placeholder(self, prepend_shape, name=None):
class CategoricalPdType (line 47) | class CategoricalPdType(PdType):
method __init__ (line 48) | def __init__(self, ncat):
method pdclass (line 50) | def pdclass(self):
method param_shape (line 52) | def param_shape(self):
method sample_shape (line 54) | def sample_shape(self):
method sample_dtype (line 56) | def sample_dtype(self):
class SoftCategoricalPdType (line 59) | class SoftCategoricalPdType(PdType):
method __init__ (line 60) | def __init__(self, ncat):
method pdclass (line 62) | def pdclass(self):
method param_shape (line 64) | def param_shape(self):
method sample_shape (line 66) | def sample_shape(self):
method sample_dtype (line 68) | def sample_dtype(self):
class MultiCategoricalPdType (line 71) | class MultiCategoricalPdType(PdType):
method __init__ (line 72) | def __init__(self, low, high):
method pdclass (line 76) | def pdclass(self):
method pdfromflat (line 78) | def pdfromflat(self, flat):
method param_shape (line 80) | def param_shape(self):
method sample_shape (line 82) | def sample_shape(self):
method sample_dtype (line 84) | def sample_dtype(self):
class SoftMultiCategoricalPdType (line 87) | class SoftMultiCategoricalPdType(PdType):
method __init__ (line 88) | def __init__(self, low, high):
method pdclass (line 92) | def pdclass(self):
method pdfromflat (line 94) | def pdfromflat(self, flat):
method param_shape (line 96) | def param_shape(self):
method sample_shape (line 98) | def sample_shape(self):
method sample_dtype (line 100) | def sample_dtype(self):
class DiagGaussianPdType (line 103) | class DiagGaussianPdType(PdType):
method __init__ (line 104) | def __init__(self, size):
method pdclass (line 106) | def pdclass(self):
method param_shape (line 108) | def param_shape(self):
method sample_shape (line 110) | def sample_shape(self):
method sample_dtype (line 112) | def sample_dtype(self):
class BernoulliPdType (line 115) | class BernoulliPdType(PdType):
method __init__ (line 116) | def __init__(self, size):
method pdclass (line 118) | def pdclass(self):
method param_shape (line 120) | def param_shape(self):
method sample_shape (line 122) | def sample_shape(self):
method sample_dtype (line 124) | def sample_dtype(self):
class CategoricalPd (line 150) | class CategoricalPd(Pd):
method __init__ (line 151) | def __init__(self, logits):
method flatparam (line 153) | def flatparam(self):
method mode (line 155) | def mode(self):
method logp (line 157) | def logp(self, x):
method kl (line 159) | def kl(self, other):
method entropy (line 168) | def entropy(self):
method sample (line 174) | def sample(self):
method fromflat (line 178) | def fromflat(cls, flat):
class SoftCategoricalPd (line 181) | class SoftCategoricalPd(Pd):
method __init__ (line 182) | def __init__(self, logits):
method flatparam (line 184) | def flatparam(self):
method mode (line 186) | def mode(self):
method logp (line 188) | def logp(self, x):
method kl (line 190) | def kl(self, other):
method entropy (line 199) | def entropy(self):
method sample (line 205) | def sample(self):
method fromflat (line 209) | def fromflat(cls, flat):
class MultiCategoricalPd (line 212) | class MultiCategoricalPd(Pd):
method __init__ (line 213) | def __init__(self, low, high, flat):
method flatparam (line 217) | def flatparam(self):
method mode (line 219) | def mode(self):
method logp (line 221) | def logp(self, x):
method kl (line 223) | def kl(self, other):
method entropy (line 227) | def entropy(self):
method sample (line 229) | def sample(self):
method fromflat (line 232) | def fromflat(cls, flat):
class SoftMultiCategoricalPd (line 235) | class SoftMultiCategoricalPd(Pd): # doesn't work yet
method __init__ (line 236) | def __init__(self, low, high, flat):
method flatparam (line 240) | def flatparam(self):
method mode (line 242) | def mode(self):
method logp (line 247) | def logp(self, x):
method kl (line 249) | def kl(self, other):
method entropy (line 253) | def entropy(self):
method sample (line 255) | def sample(self):
method fromflat (line 261) | def fromflat(cls, flat):
class DiagGaussianPd (line 264) | class DiagGaussianPd(Pd):
method __init__ (line 265) | def __init__(self, flat):
method flatparam (line 271) | def flatparam(self):
method mode (line 273) | def mode(self):
method logp (line 275) | def logp(self, x):
method kl (line 279) | def kl(self, other):
method entropy (line 282) | def entropy(self):
method sample (line 284) | def sample(self):
method fromflat (line 287) | def fromflat(cls, flat):
class BernoulliPd (line 290) | class BernoulliPd(Pd):
method __init__ (line 291) | def __init__(self, logits):
method flatparam (line 294) | def flatparam(self):
method mode (line 296) | def mode(self):
method logp (line 298) | def logp(self, x):
method kl (line 300) | def kl(self, other):
method entropy (line 302) | def entropy(self):
method sample (line 304) | def sample(self):
method fromflat (line 309) | def fromflat(cls, flat):
function make_pdtype (line 312) | def make_pdtype(ac_space):
function shape_el (line 328) | def shape_el(v, i):
FILE: examples/Social_Cognition/FOToM/common/tile_images.py
function tile_images (line 3) | def tile_images(img_nhwc):
FILE: examples/Social_Cognition/FOToM/common/vec_env/vec_env.py
class AlreadySteppingError (line 7) | class AlreadySteppingError(Exception):
method __init__ (line 13) | def __init__(self):
class NotSteppingError (line 18) | class NotSteppingError(Exception):
method __init__ (line 24) | def __init__(self):
class VecEnv (line 29) | class VecEnv(ABC):
method __init__ (line 43) | def __init__(self, num_envs, observation_space, action_space):
method reset (line 49) | def reset(self):
method step_async (line 61) | def step_async(self, actions):
method step_wait (line 73) | def step_wait(self):
method close_extras (line 86) | def close_extras(self):
method close (line 93) | def close(self):
method step (line 101) | def step(self, actions):
method render (line 110) | def render(self, mode='human'):
method get_images (line 121) | def get_images(self):
method unwrapped (line 128) | def unwrapped(self):
method get_viewer (line 134) | def get_viewer(self):
class VecEnvWrapper (line 140) | class VecEnvWrapper(VecEnv):
method __init__ (line 146) | def __init__(self, venv, observation_space=None, action_space=None):
method step_async (line 152) | def step_async(self, actions):
method reset (line 156) | def reset(self):
method step_wait (line 160) | def step_wait(self):
method close (line 163) | def close(self):
method render (line 166) | def render(self, mode='human'):
method get_images (line 169) | def get_images(self):
method __getattr__ (line 172) | def __getattr__(self, name):
class VecEnvObservationWrapper (line 177) | class VecEnvObservationWrapper(VecEnvWrapper):
method process (line 179) | def process(self, obs):
method reset (line 182) | def reset(self):
method step_wait (line 186) | def step_wait(self):
class CloudpickleWrapper (line 190) | class CloudpickleWrapper(object):
method __init__ (line 195) | def __init__(self, x):
method __getstate__ (line 198) | def __getstate__(self):
method __setstate__ (line 202) | def __setstate__(self, ob):
function clear_mpi_env_vars (line 208) | def clear_mpi_env_vars():
FILE: examples/Social_Cognition/FOToM/evaluate.py
function display_frames_as_gif (line 15) | def display_frames_as_gif(frames):
function make_parallel_env (line 21) | def make_parallel_env(env_id, n_rollout_threads, discrete_action, num_go...
function run (line 34) | def run(config):
FILE: examples/Social_Cognition/FOToM/main.py
function get_common_args (line 22) | def get_common_args():
function make_parallel_env (line 76) | def make_parallel_env(env_id, n_rollout_threads, seed, discrete_action, ...
function run (line 89) | def run(config):
FILE: examples/Social_Cognition/FOToM/multiagent/core.py
class EntityState (line 4) | class EntityState(object):
method __init__ (line 5) | def __init__(self):
class AgentState (line 12) | class AgentState(EntityState):
method __init__ (line 13) | def __init__(self):
class Action (line 19) | class Action(object):
method __init__ (line 20) | def __init__(self):
class Entity (line 27) | class Entity(object):
method __init__ (line 28) | def __init__(self):
method mass (line 50) | def mass(self):
class Landmark (line 54) | class Landmark(Entity):
method __init__ (line 55) | def __init__(self):
class Agent (line 59) | class Agent(Entity):
method __init__ (line 60) | def __init__(self):
class World (line 82) | class World(object):
method __init__ (line 83) | def __init__(self):
method entities (line 103) | def entities(self):
method policy_agents (line 108) | def policy_agents(self):
method scripted_agents (line 113) | def scripted_agents(self):
method step (line 117) | def step(self):
method apply_action_force (line 134) | def apply_action_force(self, p_force):
method apply_environment_force (line 143) | def apply_environment_force(self, p_force):
method integrate_state (line 158) | def integrate_state(self, p_force):
method update_agent_state (line 171) | def update_agent_state(self, agent):
method get_collision_force (line 180) | def get_collision_force(self, entity_a, entity_b):
FILE: examples/Social_Cognition/FOToM/multiagent/environment.py
class MultiAgentEnv (line 9) | class MultiAgentEnv(gym.Env):
method __init__ (line 14) | def __init__(self, world, reset_callback=None, reward_callback=None,
method step (line 80) | def step(self, action_n):
method reset (line 106) | def reset(self):
method _get_info (line 119) | def _get_info(self, agent):
method _get_obs (line 125) | def _get_obs(self, agent):
method _get_done (line 132) | def _get_done(self, agent):
method _get_reward (line 138) | def _get_reward(self, agent):
method _set_action (line 144) | def _set_action(self, action, agent, action_space, time=None):
method _reset_render (line 195) | def _reset_render(self):
method render (line 200) | def render(self, mode='human'):
method _make_receptor_locations (line 266) | def _make_receptor_locations(self, agent):
class BatchMultiAgentEnv (line 288) | class BatchMultiAgentEnv(gym.Env):
method __init__ (line 294) | def __init__(self, env_batch):
method n (line 298) | def n(self):
method action_space (line 302) | def action_space(self):
method observation_space (line 306) | def observation_space(self):
method step (line 309) | def step(self, action_n, time):
method reset (line 324) | def reset(self):
method render (line 331) | def render(self, mode='human', close=True):
FILE: examples/Social_Cognition/FOToM/multiagent/multi_discrete.py
class MultiDiscrete (line 9) | class MultiDiscrete(gym.Space):
method __init__ (line 25) | def __init__(self, array_of_param_array):
method sample (line 30) | def sample(self):
method contains (line 36) | def contains(self, x):
method shape (line 40) | def shape(self):
method __repr__ (line 42) | def __repr__(self):
method __eq__ (line 44) | def __eq__(self, other):
FILE: examples/Social_Cognition/FOToM/multiagent/policy.py
class Policy (line 5) | class Policy(object):
method __init__ (line 6) | def __init__(self):
method action (line 8) | def action(self, obs):
class InteractivePolicy (line 13) | class InteractivePolicy(Policy):
method __init__ (line 14) | def __init__(self, env, agent_index):
method action (line 24) | def action(self, obs):
method key_press (line 43) | def key_press(self, k, mod):
method key_release (line 48) | def key_release(self, k, mod):
FILE: examples/Social_Cognition/FOToM/multiagent/rendering.py
function get_display (line 33) | def get_display(spec):
class Viewer (line 46) | class Viewer(object):
method __init__ (line 47) | def __init__(self, width, height, display=None):
method close (line 67) | def close(self):
method window_closed_by_user (line 70) | def window_closed_by_user(self):
method set_bounds (line 73) | def set_bounds(self, left, right, bottom, top):
method add_geom (line 81) | def add_geom(self, geom):
method add_onetime (line 84) | def add_onetime(self, geom):
method render (line 87) | def render(self, return_rgb_array=False):
method draw_circle (line 116) | def draw_circle(self, radius=10, res=30, filled=True, **attrs):
method draw_polygon (line 122) | def draw_polygon(self, v, filled=True, **attrs):
method draw_polyline (line 128) | def draw_polyline(self, v, **attrs):
method draw_line (line 134) | def draw_line(self, start, end, **attrs):
method get_array (line 140) | def get_array(self):
function _add_attrs (line 148) | def _add_attrs(geom, attrs):
class Geom (line 154) | class Geom(object):
method __init__ (line 155) | def __init__(self):
method render (line 158) | def render(self):
method render1 (line 164) | def render1(self):
method add_attr (line 166) | def add_attr(self, attr):
method set_color (line 168) | def set_color(self, r, g, b, alpha=1):
class Attr (line 171) | class Attr(object):
method enable (line 172) | def enable(self):
method disable (line 174) | def disable(self):
class Transform (line 177) | class Transform(Attr):
method __init__ (line 178) | def __init__(self, translation=(0.0, 0.0), rotation=0.0, scale=(1,1)):
method enable (line 182) | def enable(self):
method disable (line 187) | def disable(self):
method set_translation (line 189) | def set_translation(self, newx, newy):
method set_rotation (line 191) | def set_rotation(self, new):
method set_scale (line 193) | def set_scale(self, newx, newy):
class Color (line 196) | class Color(Attr):
method __init__ (line 197) | def __init__(self, vec4):
method enable (line 199) | def enable(self):
class LineStyle (line 202) | class LineStyle(Attr):
method __init__ (line 203) | def __init__(self, style):
method enable (line 205) | def enable(self):
method disable (line 208) | def disable(self):
class LineWidth (line 211) | class LineWidth(Attr):
method __init__ (line 212) | def __init__(self, stroke):
method enable (line 214) | def enable(self):
class Point (line 217) | class Point(Geom):
method __init__ (line 218) | def __init__(self):
method render1 (line 220) | def render1(self):
class FilledPolygon (line 225) | class FilledPolygon(Geom):
method __init__ (line 226) | def __init__(self, v):
method render1 (line 229) | def render1(self):
function make_circle (line 244) | def make_circle(radius=10, res=30, filled=True):
function make_polygon (line 254) | def make_polygon(v, filled=True):
function make_polyline (line 258) | def make_polyline(v):
function make_capsule (line 261) | def make_capsule(length, width):
class Compound (line 270) | class Compound(Geom):
method __init__ (line 271) | def __init__(self, gs):
method render1 (line 276) | def render1(self):
class PolyLine (line 280) | class PolyLine(Geom):
method __init__ (line 281) | def __init__(self, v, close):
method render1 (line 287) | def render1(self):
method set_linewidth (line 292) | def set_linewidth(self, x):
class Line (line 295) | class Line(Geom):
method __init__ (line 296) | def __init__(self, start=(0.0, 0.0), end=(0.0, 0.0)):
method render1 (line 303) | def render1(self):
class Image (line 309) | class Image(Geom):
method __init__ (line 310) | def __init__(self, fname, width, height):
method render1 (line 317) | def render1(self):
class SimpleImageViewer (line 322) | class SimpleImageViewer(object):
method __init__ (line 323) | def __init__(self, display=None):
method imshow (line 327) | def imshow(self, arr):
method close (line 341) | def close(self):
method __del__ (line 345) | def __del__(self):
FILE: examples/Social_Cognition/FOToM/multiagent/scenario.py
class BaseScenario (line 4) | class BaseScenario(object):
method make_world (line 6) | def make_world(self):
method reset_world (line 9) | def reset_world(self, world):
FILE: examples/Social_Cognition/FOToM/multiagent/scenarios/__init__.py
function load (line 5) | def load(name):
FILE: examples/Social_Cognition/FOToM/multiagent/scenarios/hetero_spread.py
class Scenario (line 6) | class Scenario(BaseScenario):
method make_world (line 7) | def make_world(self, num_good_agents=2, num_adversaries=0):
method reset_world (line 46) | def reset_world(self, world):
method benchmark_data (line 67) | def benchmark_data(self, agent, world):
method is_collision (line 86) | def is_collision(self, agent1, agent2):
method reward (line 92) | def reward(self, agent, world):
method observation (line 126) | def observation(self, agent, world):
FILE: examples/Social_Cognition/FOToM/multiagent/scenarios/simple.py
class Scenario (line 5) | class Scenario(BaseScenario):
method make_world (line 6) | def make_world(self):
method reset_world (line 24) | def reset_world(self, world):
method reward (line 41) | def reward(self, agent, world):
method observation (line 45) | def observation(self, agent, world):
FILE: examples/Social_Cognition/FOToM/multiagent/scenarios/simple_adversary.py
class Scenario (line 6) | class Scenario(BaseScenario):
method make_world (line 8) | def make_world(self, num_good_agents=2, num_adversaries=1):
method reset_world (line 37) | def reset_world(self, world):
method benchmark_data (line 59) | def benchmark_data(self, agent, world):
method good_agents (line 71) | def good_agents(self, world):
method adversaries (line 75) | def adversaries(self, world):
method reward (line 78) | def reward(self, agent, world):
method agent_reward (line 82) | def agent_reward(self, agent, world):
method adversary_reward (line 111) | def adversary_reward(self, agent, world):
method observation (line 123) | def observation(self, agent, world):
FILE: examples/Social_Cognition/FOToM/multiagent/scenarios/simple_crypto.py
class CryptoAgent (line 14) | class CryptoAgent(Agent):
method __init__ (line 15) | def __init__(self):
class Scenario (line 19) | class Scenario(BaseScenario):
method make_world (line 21) | def make_world(self):
method reset_world (line 47) | def reset_world(self, world):
method benchmark_data (line 78) | def benchmark_data(self, agent, world):
method good_listeners (line 83) | def good_listeners(self, world):
method good_agents (line 87) | def good_agents(self, world):
method adversaries (line 91) | def adversaries(self, world):
method reward (line 94) | def reward(self, agent, world):
method agent_reward (line 97) | def agent_reward(self, agent, world):
method adversary_reward (line 116) | def adversary_reward(self, agent, world):
method observation (line 124) | def observation(self, agent, world):
FILE: examples/Social_Cognition/FOToM/multiagent/scenarios/simple_push.py
class Scenario (line 5) | class Scenario(BaseScenario):
method make_world (line 6) | def make_world(self, num_good_agents=2, num_adversaries=2):
method reset_world (line 33) | def reset_world(self, world):
method reward (line 58) | def reward(self, agent, world):
method good_agents (line 62) | def good_agents(self, world):
method adversaries (line 66) | def adversaries(self, world):
method is_collision (line 69) | def is_collision(self, agent1, agent2):
method agent_reward (line 75) | def agent_reward(self, agent, world):
method adversary_reward (line 104) | def adversary_reward(self, agent, world):
method observation (line 130) | def observation(self, agent, world):
FILE: examples/Social_Cognition/FOToM/multiagent/scenarios/simple_reference.py
class Scenario (line 5) | class Scenario(BaseScenario):
method make_world (line 6) | def make_world(self, num_good_agents=2, num_adversaries=0):
method reset_world (line 26) | def reset_world(self, world):
method reward (line 55) | def reward(self, agent, world):
method observation (line 61) | def observation(self, agent, world):
FILE: examples/Social_Cognition/FOToM/multiagent/scenarios/simple_speaker_listener.py
class Scenario (line 5) | class Scenario(BaseScenario):
method make_world (line 6) | def make_world(self):
method reset_world (line 33) | def reset_world(self, world):
method benchmark_data (line 59) | def benchmark_data(self, agent, world):
method reward (line 63) | def reward(self, agent, world):
method observation (line 69) | def observation(self, agent, world):
FILE: examples/Social_Cognition/FOToM/multiagent/scenarios/simple_spread.py
class Scenario (line 6) | class Scenario(BaseScenario):
method make_world (line 7) | def make_world(self, num_good_agents=2, num_adversaries=2):
method reset_world (line 31) | def reset_world(self, world):
method benchmark_data (line 47) | def benchmark_data(self, agent, world):
method is_collision (line 66) | def is_collision(self, agent1, agent2):
method reward (line 72) | def reward(self, agent, world):
method observation (line 106) | def observation(self, agent, world):
FILE: examples/Social_Cognition/FOToM/multiagent/scenarios/simple_tag.py
class Scenario (line 6) | class Scenario(BaseScenario):
method make_world (line 7) | def make_world(self, num_good_agents=1, num_adversaries=3):
method reset_world (line 39) | def reset_world(self, world):
method benchmark_data (line 57) | def benchmark_data(self, agent, world):
method is_collision (line 69) | def is_collision(self, agent1, agent2):
method good_agents (line 76) | def good_agents(self, world):
method adversaries (line 80) | def adversaries(self, world):
method reward (line 84) | def reward(self, agent, world):
method agent_reward (line 89) | def agent_reward(self, agent, world):
method adversary_reward (line 115) | def adversary_reward(self, agent, world):
method observation (line 131) | def observation(self, agent, world):
FILE: examples/Social_Cognition/FOToM/multiagent/scenarios/simple_world_comm.py
class Scenario (line 6) | class Scenario(BaseScenario):
method make_world (line 7) | def make_world(self, num_good_agents=2, num_adversaries=4):
method set_boundaries (line 59) | def set_boundaries(self, world):
method reset_world (line 88) | def reset_world(self, world):
method benchmark_data (line 115) | def benchmark_data(self, agent, world):
method is_collision (line 126) | def is_collision(self, agent1, agent2):
method good_agents (line 134) | def good_agents(self, world):
method adversaries (line 138) | def adversaries(self, world):
method reward (line 142) | def reward(self, agent, world):
method outside_boundary (line 148) | def outside_boundary(self, agent):
method agent_reward (line 155) | def agent_reward(self, agent, world):
method adversary_reward (line 185) | def adversary_reward(self, agent, world):
method observation2 (line 201) | def observation2(self, agent, world):
method observation (line 224) | def observation(self, agent, world):
FILE: examples/Social_Cognition/FOToM/utils/agents.py
class DDPGAgent (line 10) | class DDPGAgent(object):
method __init__ (line 15) | def __init__(self, num_in_pol, num_out_pol, num_in_critic, hidden_dim=64,
method reset_noise (line 47) | def reset_noise(self):
method scale_noise (line 51) | def scale_noise(self, scale):
method step (line 57) | def step(self, obs, explore=False):
method get_params (line 89) | def get_params(self):
method load_params (line 97) | def load_params(self, params):
class DDPGAgent_RNN (line 105) | class DDPGAgent_RNN(object):
method __init__ (line 110) | def __init__(self, num_in_pol, num_out_pol, num_in_critic, hidden_dim=64,
method reset_noise (line 150) | def reset_noise(self):
method scale_noise (line 154) | def scale_noise(self, scale):
method step (line 160) | def step(self, obs, explore=False):
method get_params (line 183) | def get_params(self):
method load_params (line 191) | def load_params(self, params):
method init_hidden (line 199) | def init_hidden(self, len_ep, policy_hidden=False, policy_target_hidde...
class DDPGAgent_SNN (line 211) | class DDPGAgent_SNN(object):
method __init__ (line 216) | def __init__(self, num_in_pol, num_out_pol, num_in_critic, output_styl...
method reset_noise (line 246) | def reset_noise(self):
method scale_noise (line 250) | def scale_noise(self, scale):
method step (line 256) | def step(self, obs, explore=False):
method get_params (line 297) | def get_params(self):
method load_params (line 305) | def load_params(self, params):
class DDPGAgent_ToM (line 313) | class DDPGAgent_ToM(object):
method __init__ (line 318) | def __init__(self, num_in_pol, num_out_pol, num_in_critic, num_in_mle,...
method reset_noise (line 366) | def reset_noise(self):
method scale_noise (line 370) | def scale_noise(self, scale):
method step (line 376) | def step(self, obs, explore=False):
method get_params (line 411) | def get_params(self):
method load_params (line 424) | def load_params(self, params):
class rDDPGAgent_ToM (line 435) | class rDDPGAgent_ToM(object):
method __init__ (line 440) | def __init__(self, num_in_pol, num_out_pol, num_in_critic, num_in_mle,...
method reset_noise (line 485) | def reset_noise(self):
method scale_noise (line 489) | def scale_noise(self, scale):
method step (line 495) | def step(self, obs, explore=False):
method get_params (line 530) | def get_params(self):
method load_params (line 543) | def load_params(self, params):
method init_hidden (line 554) | def init_hidden(self, len_ep, policy_hidden=False, policy_target_hidde...
class lDDPGAgent (line 566) | class lDDPGAgent(object):
method __init__ (line 571) | def __init__(self, num_in_pol, num_out_pol, num_in_critic, hidden_dim=64,
method reset_noise (line 603) | def reset_noise(self):
method scale_noise (line 607) | def scale_noise(self, scale):
method step (line 613) | def step(self, obs, explore=False):
method get_params (line 645) | def get_params(self):
method load_params (line 653) | def load_params(self, params):
FILE: examples/Social_Cognition/FOToM/utils/buffer.py
class ReplayBuffer (line 6) | class ReplayBuffer(object):
method __init__ (line 10) | def __init__(self, max_steps, num_agents, obs_dims, ac_dims, device):
method __len__ (line 37) | def __len__(self):
method push (line 40) | def push(self, observations, actions, rewards, next_observations, dones):
method sample (line 72) | def sample(self, N, to_gpu=False, norm_rews=True):
method get_average_rewards (line 92) | def get_average_rewards(self, N):
class ReplayBuffer_pre (line 99) | class ReplayBuffer_pre(object):
method __init__ (line 103) | def __init__(self, max_steps, num_agents, obs_dims, ac_dims, device):
method __len__ (line 132) | def __len__(self):
method push (line 135) | def push(self, actions_pre, observations, actions, rewards, next_obser...
method sample (line 170) | def sample(self, N, to_gpu=False, norm_rews=True):
method get_average_rewards (line 194) | def get_average_rewards(self, N):
class ReplayBuffer_RNN (line 202) | class ReplayBuffer_RNN(object):
method __init__ (line 206) | def __init__(self, max_steps, num_agents, obs_dims, ac_dims, ep_dims, ...
method __len__ (line 234) | def __len__(self):
method push (line 237) | def push(self, observations_ep, actions_ep, rewards_ep, next_observati...
method sample (line 278) | def sample(self, N, to_gpu=False, norm_rews=True):
method get_average_rewards (line 298) | def get_average_rewards(self, N):
FILE: examples/Social_Cognition/FOToM/utils/env_wrappers.py
function worker (line 9) | def worker(remote, parent_remote, env_fn_wrapper):
class SubprocVecEnv (line 42) | class SubprocVecEnv(VecEnv):
method __init__ (line 43) | def __init__(self, env_fns, spaces=None):
method step_async (line 67) | def step_async(self, actions):
method step_wait (line 72) | def step_wait(self):
method reset (line 78) | def reset(self):
method reset_task (line 83) | def reset_task(self):
method close (line 88) | def close(self):
class DummyVecEnv (line 101) | class DummyVecEnv(VecEnv):
method __init__ (line 102) | def __init__(self, env_fns):
method step_async (line 114) | def step_async(self, actions):
method step_wait (line 117) | def step_wait(self):
method reset (line 128) | def reset(self):
method close (line 132) | def close(self):
FILE: examples/Social_Cognition/FOToM/utils/make_env.py
function make_env (line 15) | def make_env(scenario_name, num_good_agents, num_adversaries, benchmark=...
FILE: examples/Social_Cognition/FOToM/utils/misc.py
function soft_update (line 9) | def soft_update(target, source, tau):
function hard_update (line 22) | def hard_update(target, source):
function average_gradients (line 33) | def average_gradients(model):
function init_processes (line 41) | def init_processes(rank, size, fn, backend='gloo'):
function onehot_from_logits (line 48) | def onehot_from_logits(logits, eps=0.0):
function sample_gumbel (line 65) | def sample_gumbel(shape, eps=1e-20, tens_type=torch.FloatTensor):
function gumbel_softmax_sample (line 71) | def gumbel_softmax_sample(logits, temperature):
function gumbel_softmax (line 77) | def gumbel_softmax(logits, temperature=1.0, hard=False):
FILE: examples/Social_Cognition/FOToM/utils/multiprocessing.py
function _flatten_list (line 10) | def _flatten_list(l):
function worker (line 18) | def worker(remote, parent_remote, env_fn_wrapper):
class VecEnv (line 58) | class VecEnv(object):
method __init__ (line 68) | def __init__(self, num_envs, observation_space, action_space):
method observe (line 73) | def observe(self, agent):
method reset (line 76) | def reset(self):
method step_async (line 86) | def step_async(self, actions):
method step_wait (line 96) | def step_wait(self):
method close (line 108) | def close(self):
method step (line 114) | def step(self, actions):
method render (line 118) | def render(self, mode='human'):
method get_images (line 130) | def get_images(self):
method get_viewer (line 136) | def get_viewer(self):
method tile_images (line 142) | def tile_images(self, img_nhwc):
class CloudpickleWrapper (line 162) | class CloudpickleWrapper(object):
method __init__ (line 167) | def __init__(self, x):
method __getstate__ (line 170) | def __getstate__(self):
method __setstate__ (line 174) | def __setstate__(self, ob):
class SubprocVecEnv (line 179) | class SubprocVecEnv(VecEnv):
method __init__ (line 180) | def __init__(self, env_fns, spaces=None):
method step_async (line 203) | def step_async(self, actions):
method step_wait (line 208) | def step_wait(self):
method step_wait_2 (line 214) | def step_wait_2(self):
method step_wait_3 (line 220) | def step_wait_3(self):
method reset (line 226) | def reset(self):
method agents (line 231) | def agents(self):
method world (line 236) | def world(self):
method reset_task (line 241) | def reset_task(self):
method spec (line 246) | def spec(self):
method get_images (line 251) | def get_images(self):
method observe (line 259) | def observe(self, agent):
method close (line 267) | def close(self):
method __len__ (line 279) | def __len__(self):
function _flatten_list (line 282) | def _flatten_list(l):
class DummyVecEnv (line 289) | class DummyVecEnv(VecEnv):
method __init__ (line 290) | def __init__(self, env_fns):
method step_async (line 302) | def step_async(self, actions):
method step_wait (line 305) | def step_wait(self):
method reset (line 316) | def reset(self):
method close (line 320) | def close(self):
FILE: examples/Social_Cognition/FOToM/utils/networks.py
class MLPNetwork (line 6) | class MLPNetwork(nn.Module):
method __init__ (line 10) | def __init__(self, input_dim, out_dim, hidden_dim=64, nonlin=F.relu,
method forward (line 39) | def forward(self, X):
class RNN (line 51) | class RNN(nn.Module):
method __init__ (line 53) | def __init__(self, input_dim, out_dim, hidden_dim=64, nonlin=F.relu,
method forward (line 73) | def forward(self, obs, hidden_state):
class BCNoSpikingLIFNode (line 81) | class BCNoSpikingLIFNode(LIFNode):
method __init__ (line 82) | def __init__(self, *args, **kwargs):
method forward (line 85) | def forward(self, dv: torch.Tensor):
class SNNNetwork (line 89) | class SNNNetwork(nn.Module):
method __init__ (line 93) | def __init__(self, input_dim, out_dim, hidden_dim=64, node=LIFNode, ti...
method reset (line 127) | def reset(self):
method forward (line 132) | def forward(self, X):
class LSTMClassifier (line 153) | class LSTMClassifier(nn.Module):
method __init__ (line 154) | def __init__(self, input_size, output_size, hidden_size=256):
method forward (line 159) | def forward(self, x):
FILE: examples/Social_Cognition/FOToM/utils/noise.py
class OUNoise (line 5) | class OUNoise:
method __init__ (line 6) | def __init__(self, action_dimension, scale=0.1, mu=0, theta=0.15, sigm...
method reset (line 15) | def reset(self):
method noise (line 18) | def noise(self):
FILE: examples/Social_Cognition/Intention_Prediction/Intention_Prediction.py
class CustomLinear (line 25) | class CustomLinear(nn.Module):
method __init__ (line 26) | def __init__(self, weight,mask=None):
method forward (line 31) | def forward(self, x: torch.Tensor):
method update (line 36) | def update(self, dw):
class DLPFCNet (line 43) | class DLPFCNet(nn.Module):
method __init__ (line 44) | def __init__(self,connection):
method forward (line 57) | def forward(self, input):
method reset (line 73) | def reset(self):
method UpdateWeight (line 79) | def UpdateWeight(self, i, W):
class OFCNet (line 83) | class OFCNet(nn.Module):
method __init__ (line 84) | def __init__(self,connection):
method forward (line 105) | def forward(self, Input_Tha, Input_SNc, Reward):
method reset (line 120) | def reset(self):
class BGNet (line 127) | class BGNet(nn.Module):
method __init__ (line 128) | def __init__(self,connection):
method forward (line 144) | def forward(self, input1, input2, input3):
method reset (line 154) | def reset(self):
method UpdateWeight (line 159) | def UpdateWeight(self, i, W):
function STDP (line 163) | def STDP(Pre_mat, Post_mat, W):
FILE: examples/Social_Cognition/MAToM-SNN/MPE/agents/agents.py
class DDPGAgent (line 10) | class DDPGAgent(object):
method __init__ (line 15) | def __init__(self, num_in_pol, num_out_pol, num_in_critic, hidden_dim=64,
method reset_noise (line 47) | def reset_noise(self):
method scale_noise (line 51) | def scale_noise(self, scale):
method step (line 57) | def step(self, obs, explore=False):
method get_params (line 89) | def get_params(self):
method load_params (line 97) | def load_params(self, params):
class DDPGAgent_RNN (line 105) | class DDPGAgent_RNN(object):
method __init__ (line 110) | def __init__(self, num_in_pol, num_out_pol, num_in_critic, hidden_dim=64,
method reset_noise (line 150) | def reset_noise(self):
method scale_noise (line 154) | def scale_noise(self, scale):
method step (line 160) | def step(self, obs, explore=False):
method get_params (line 183) | def get_params(self):
method load_params (line 191) | def load_params(self, params):
method init_hidden (line 199) | def init_hidden(self, len_ep, policy_hidden=False, policy_target_hidde...
class DDPGAgent_SNN (line 211) | class DDPGAgent_SNN(object):
method __init__ (line 216) | def __init__(self, num_in_pol, num_out_pol, num_in_critic, output_styl...
method reset_noise (line 246) | def reset_noise(self):
method scale_noise (line 250) | def scale_noise(self, scale):
method step (line 256) | def step(self, obs, explore=False):
method get_params (line 297) | def get_params(self):
method load_params (line 305) | def load_params(self, params):
class DDPGAgent_ToM (line 313) | class DDPGAgent_ToM(object):
method __init__ (line 318) | def __init__(self, num_in_pol, num_out_pol, num_in_critic, num_in_mle,...
method reset_noise (line 354) | def reset_noise(self):
method scale_noise (line 358) | def scale_noise(self, scale):
method step (line 364) | def step(self, obs, explore=False):
method get_params (line 399) | def get_params(self):
method load_params (line 412) | def load_params(self, params):
FILE: examples/Social_Cognition/MAToM-SNN/MPE/common/distributions.py
class Pd (line 10) | class Pd(object):
method flatparam (line 14) | def flatparam(self):
method mode (line 16) | def mode(self):
method logp (line 18) | def logp(self, x):
method kl (line 20) | def kl(self, other):
method entropy (line 22) | def entropy(self):
method sample (line 24) | def sample(self):
class PdType (line 27) | class PdType(object):
method pdclass (line 31) | def pdclass(self):
method pdfromflat (line 33) | def pdfromflat(self, flat):
method param_shape (line 35) | def param_shape(self):
method sample_shape (line 37) | def sample_shape(self):
method sample_dtype (line 39) | def sample_dtype(self):
method param_placeholder (line 42) | def param_placeholder(self, prepend_shape, name=None):
method sample_placeholder (line 44) | def sample_placeholder(self, prepend_shape, name=None):
class CategoricalPdType (line 47) | class CategoricalPdType(PdType):
method __init__ (line 48) | def __init__(self, ncat):
method pdclass (line 50) | def pdclass(self):
method param_shape (line 52) | def param_shape(self):
method sample_shape (line 54) | def sample_shape(self):
method sample_dtype (line 56) | def sample_dtype(self):
class SoftCategoricalPdType (line 59) | class SoftCategoricalPdType(PdType):
method __init__ (line 60) | def __init__(self, ncat):
method pdclass (line 62) | def pdclass(self):
method param_shape (line 64) | def param_shape(self):
method sample_shape (line 66) | def sample_shape(self):
method sample_dtype (line 68) | def sample_dtype(self):
class MultiCategoricalPdType (line 71) | class MultiCategoricalPdType(PdType):
method __init__ (line 72) | def __init__(self, low, high):
method pdclass (line 76) | def pdclass(self):
method pdfromflat (line 78) | def pdfromflat(self, flat):
method param_shape (line 80) | def param_shape(self):
method sample_shape (line 82) | def sample_shape(self):
method sample_dtype (line 84) | def sample_dtype(self):
class SoftMultiCategoricalPdType (line 87) | class SoftMultiCategoricalPdType(PdType):
method __init__ (line 88) | def __init__(self, low, high):
method pdclass (line 92) | def pdclass(self):
method pdfromflat (line 94) | def pdfromflat(self, flat):
method param_shape (line 96) | def param_shape(self):
method sample_shape (line 98) | def sample_shape(self):
method sample_dtype (line 100) | def sample_dtype(self):
class DiagGaussianPdType (line 103) | class DiagGaussianPdType(PdType):
method __init__ (line 104) | def __init__(self, size):
method pdclass (line 106) | def pdclass(self):
method param_shape (line 108) | def param_shape(self):
method sample_shape (line 110) | def sample_shape(self):
method sample_dtype (line 112) | def sample_dtype(self):
class BernoulliPdType (line 115) | class BernoulliPdType(PdType):
method __init__ (line 116) | def __init__(self, size):
method pdclass (line 118) | def pdclass(self):
method param_shape (line 120) | def param_shape(self):
method sample_shape (line 122) | def sample_shape(self):
method sample_dtype (line 124) | def sample_dtype(self):
class CategoricalPd (line 150) | class CategoricalPd(Pd):
method __init__ (line 151) | def __init__(self, logits):
method flatparam (line 153) | def flatparam(self):
method mode (line 155) | def mode(self):
method logp (line 157) | def logp(self, x):
method kl (line 159) | def kl(self, other):
method entropy (line 168) | def entropy(self):
method sample (line 174) | def sample(self):
method fromflat (line 178) | def fromflat(cls, flat):
class SoftCategoricalPd (line 181) | class SoftCategoricalPd(Pd):
method __init__ (line 182) | def __init__(self, logits):
method flatparam (line 184) | def flatparam(self):
method mode (line 186) | def mode(self):
method logp (line 188) | def logp(self, x):
method kl (line 190) | def kl(self, other):
method entropy (line 199) | def entropy(self):
method sample (line 205) | def sample(self):
method fromflat (line 209) | def fromflat(cls, flat):
class MultiCategoricalPd (line 212) | class MultiCategoricalPd(Pd):
method __init__ (line 213) | def __init__(self, low, high, flat):
method flatparam (line 217) | def flatparam(self):
method mode (line 219) | def mode(self):
method logp (line 221) | def logp(self, x):
method kl (line 223) | def kl(self, other):
method entropy (line 227) | def entropy(self):
method sample (line 229) | def sample(self):
method fromflat (line 232) | def fromflat(cls, flat):
class SoftMultiCategoricalPd (line 235) | class SoftMultiCategoricalPd(Pd): # doesn't work yet
method __init__ (line 236) | def __init__(self, low, high, flat):
method flatparam (line 240) | def flatparam(self):
method mode (line 242) | def mode(self):
method logp (line 247) | def logp(self, x):
method kl (line 249) | def kl(self, other):
method entropy (line 253) | def entropy(self):
method sample (line 255) | def sample(self):
method fromflat (line 261) | def fromflat(cls, flat):
class DiagGaussianPd (line 264) | class DiagGaussianPd(Pd):
method __init__ (line 265) | def __init__(self, flat):
method flatparam (line 271) | def flatparam(self):
method mode (line 273) | def mode(self):
method logp (line 275) | def logp(self, x):
method kl (line 279) | def kl(self, other):
method entropy (line 282) | def entropy(self):
method sample (line 284) | def sample(self):
method fromflat (line 287) | def fromflat(cls, flat):
class BernoulliPd (line 290) | class BernoulliPd(Pd):
method __init__ (line 291) | def __init__(self, logits):
method flatparam (line 294) | def flatparam(self):
method mode (line 296) | def mode(self):
method logp (line 298) | def logp(self, x):
method kl (line 300) | def kl(self, other):
method entropy (line 302) | def entropy(self):
method sample (line 304) | def sample(self):
method fromflat (line 309) | def fromflat(cls, flat):
function make_pdtype (line 312) | def make_pdtype(ac_space):
function shape_el (line 328) | def shape_el(v, i):
FILE: examples/Social_Cognition/MAToM-SNN/MPE/common/tile_images.py
function tile_images (line 3) | def tile_images(img_nhwc):
FILE: examples/Social_Cognition/MAToM-SNN/MPE/common/vec_env/vec_env.py
class AlreadySteppingError (line 7) | class AlreadySteppingError(Exception):
method __init__ (line 13) | def __init__(self):
class NotSteppingError (line 18) | class NotSteppingError(Exception):
method __init__ (line 24) | def __init__(self):
class VecEnv (line 29) | class VecEnv(ABC):
method __init__ (line 43) | def __init__(self, num_envs, observation_space, action_space):
method reset (line 49) | def reset(self):
method step_async (line 61) | def step_async(self, actions):
method step_wait (line 73) | def step_wait(self):
method close_extras (line 86) | def close_extras(self):
method close (line 93) | def close(self):
method step (line 101) | def step(self, actions):
method render (line 110) | def render(self, mode='human'):
method get_images (line 121) | def get_images(self):
method unwrapped (line 128) | def unwrapped(self):
method get_viewer (line 134) | def get_viewer(self):
class VecEnvWrapper (line 140) | class VecEnvWrapper(VecEnv):
method __init__ (line 146) | def __init__(self, venv, observation_space=None, action_space=None):
method step_async (line 152) | def step_async(self, actions):
method reset (line 156) | def reset(self):
method step_wait (line 160) | def step_wait(self):
method close (line 163) | def close(self):
method render (line 166) | def render(self, mode='human'):
method get_images (line 169) | def get_images(self):
method __getattr__ (line 172) | def __getattr__(self, name):
class VecEnvObservationWrapper (line 177) | class VecEnvObservationWrapper(VecEnvWrapper):
method process (line 179) | def process(self, obs):
method reset (line 182) | def reset(self):
method step_wait (line 186) | def step_wait(self):
class CloudpickleWrapper (line 190) | class CloudpickleWrapper(object):
method __init__ (line 195) | def __init__(self, x):
method __getstate__ (line 198) | def __getstate__(self):
method __setstate__ (line 202) | def __setstate__(self, ob):
function clear_mpi_env_vars (line 208) | def clear_mpi_env_vars():
FILE: examples/Social_Cognition/MAToM-SNN/MPE/main.py
function get_common_args (line 16) | def get_common_args():
function make_parallel_env (line 65) | def make_parallel_env(env_id, n_rollout_threads, seed, discrete_action):
function run (line 78) | def run(config):
FILE: examples/Social_Cognition/MAToM-SNN/MPE/multiagent/scenarios/__init__.py
function load (line 5) | def load(name):
FILE: examples/Social_Cognition/MAToM-SNN/MPE/multiagent/scenarios/simple.py
class Scenario (line 5) | class Scenario(BaseScenario):
method make_world (line 6) | def make_world(self):
method reset_world (line 24) | def reset_world(self, world):
method reward (line 41) | def reward(self, agent, world):
method observation (line 45) | def observation(self, agent, world):
FILE: examples/Social_Cognition/MAToM-SNN/MPE/multiagent/scenarios/simple_crypto.py
class CryptoAgent (line 14) | class CryptoAgent(Agent):
method __init__ (line 15) | def __init__(self):
class Scenario (line 19) | class Scenario(BaseScenario):
method make_world (line 21) | def make_world(self):
method reset_world (line 47) | def reset_world(self, world):
method benchmark_data (line 78) | def benchmark_data(self, agent, world):
method good_listeners (line 83) | def good_listeners(self, world):
method good_agents (line 87) | def good_agents(self, world):
method adversaries (line 91) | def adversaries(self, world):
method reward (line 94) | def reward(self, agent, world):
method agent_reward (line 97) | def agent_reward(self, agent, world):
method adversary_reward (line 116) | def adversary_reward(self, agent, world):
method observation (line 124) | def observation(self, agent, world):
FILE: examples/Social_Cognition/MAToM-SNN/MPE/multiagent/scenarios/simple_push.py
class Scenario (line 5) | class Scenario(BaseScenario):
method make_world (line 6) | def make_world(self):
method reset_world (line 33) | def reset_world(self, world):
method reward (line 58) | def reward(self, agent, world):
method agent_reward (line 62) | def agent_reward(self, agent, world):
method adversary_reward (line 66) | def adversary_reward(self, agent, world):
method observation (line 76) | def observation(self, agent, world):
FILE: examples/Social_Cognition/MAToM-SNN/MPE/multiagent/scenarios/simple_reference.py
class Scenario (line 5) | class Scenario(BaseScenario):
method make_world (line 6) | def make_world(self):
method reset_world (line 26) | def reset_world(self, world):
method reward (line 55) | def reward(self, agent, world):
method observation (line 61) | def observation(self, agent, world):
FILE: examples/Social_Cognition/MAToM-SNN/MPE/multiagent/scenarios/simple_speaker_listener.py
class Scenario (line 5) | class Scenario(BaseScenario):
method make_world (line 6) | def make_world(self):
method reset_world (line 33) | def reset_world(self, world):
method benchmark_data (line 59) | def benchmark_data(self, agent, world):
method reward (line 63) | def reward(self, agent, world):
method observation (line 69) | def observation(self, agent, world):
FILE: examples/Social_Cognition/MAToM-SNN/MPE/multiagent/scenarios/simple_spread.py
class Scenario (line 6) | class Scenario(BaseScenario):
method make_world (line 7) | def make_world(self):
method reset_world (line 31) | def reset_world(self, world):
method benchmark_data (line 47) | def benchmark_data(self, agent, world):
method is_collision (line 66) | def is_collision(self, agent1, agent2):
method reward (line 72) | def reward(self, agent, world):
method observation (line 84) | def observation(self, agent, world):
FILE: examples/Social_Cognition/MAToM-SNN/MPE/multiagent/scenarios/simple_world_comm.py
class Scenario (line 6) | class Scenario(BaseScenario):
method make_world (line 7) | def make_world(self):
method set_boundaries (line 59) | def set_boundaries(self, world):
method reset_world (line 88) | def reset_world(self, world):
method benchmark_data (line 115) | def benchmark_data(self, agent, world):
method is_collision (line 126) | def is_collision(self, agent1, agent2):
method good_agents (line 134) | def good_agents(self, world):
method adversaries (line 138) | def adversaries(self, world):
method reward (line 142) | def reward(self, agent, world):
method outside_boundary (line 148) | def outside_boundary(self, agent):
method agent_reward (line 155) | def agent_reward(self, agent, world):
method adversary_reward (line 185) | def adversary_reward(self, agent, world):
method observation2 (line 201) | def observation2(self, agent, world):
method observation (line 224) | def observation(self, agent, world):
FILE: examples/Social_Cognition/MAToM-SNN/MPE/policy/maddpg.py
class MADDPG (line 15) | class MADDPG(object):
method __init__ (line 19) | def __init__(self, agent_init_params, alg_types, device,
method policies (line 56) | def policies(self):
method target_policies (line 60) | def target_policies(self):
method scale_noise (line 63) | def scale_noise(self, scale):
method reset_noise (line 72) | def reset_noise(self):
method step (line 76) | def step(self, observations, explore=False):
method update (line 88) | def update(self, sample, agent_i, parallel=False, logger=None):
method update_all_targets (line 180) | def update_all_targets(self):
method prep_training (line 190) | def prep_training(self, device='gpu'):
method prep_rollouts (line 217) | def prep_rollouts(self, device='cpu'):
method save (line 230) | def save(self, filename):
method init_from_env (line 240) | def init_from_env(cls, env, device, agent_alg="MADDPG", adversary_alg=...
method init_from_save (line 292) | def init_from_save(cls, filename):
class MADDPG_SNN (line 303) | class MADDPG_SNN(object):
method __init__ (line 307) | def __init__(self, agent_init_params, alg_types,output_style, device,
method policies (line 344) | def policies(self):
method target_policies (line 348) | def target_policies(self):
method scale_noise (line 351) | def scale_noise(self, scale):
method reset_noise (line 360) | def reset_noise(self):
method step (line 364) | def step(self, observations, explore=False):
method update (line 376) | def update(self, sample, agent_i, parallel=False, logger=None):
method update_all_targets (line 483) | def update_all_targets(self):
method prep_training (line 493) | def prep_training(self, device='gpu'):
method prep_rollouts (line 520) | def prep_rollouts(self, device='cpu'):
method save (line 533) | def save(self, filename):
method init_from_env (line 543) | def init_from_env(cls, env, device, agent_alg="MADDPG_SNN", adversary_...
method init_from_save (line 598) | def init_from_save(cls, filename):
class MADDPG_ToM (line 609) | class MADDPG_ToM(object):
method __init__ (line 614) | def __init__(self, agent_init_params, alg_types, output_style, device,
method policies (line 699) | def policies(self):
method target_policies (line 703) | def target_policies(self):
method scale_noise (line 706) | def scale_noise(self, scale):
method reset_noise (line 715) | def reset_noise(self):
method step (line 719) | def step(self, observations, explore=False): #simple_tag
method _get_obs (line 805) | def _get_obs(self, observations):
method trian_tag (line 853) | def trian_tag(self, agent_i, KL_criterion, obs, parallel, acs):
method trian_adv (line 885) | def trian_adv(self, agent_i, KL_criterion, obs, parallel, acs):
method trian_push (line 917) | def trian_push(self, agent_i, KL_criterion, obs, parallel, acs):
method trian_com (line 939) | def trian_com(self, agent_i, KL_criterion, obs, parallel, acs):
method update (line 964) | def update(self, sample, agent_i, parallel=False, logger=None, sample_...
method update_all_targets (line 1067) | def update_all_targets(self):
method prep_training (line 1077) | def prep_training(self, device='gpu'):
method prep_rollouts (line 1115) | def prep_rollouts(self, device='cpu'):
method save (line 1128) | def save(self, filename):
method init_from_env (line 1139) | def init_from_env(cls, env, device, agent_alg="MADDPG_ToM", adversary_...
met
Condensed preview — 685 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (4,962K chars).
[
{
"path": ".gitignore",
"chars": 342,
"preview": ".idea\n*.egg-info/\neggs/\n.eggs/\n*.exe\n*.pyc\n/.vscode/\n*.code-workspace\n__pycache__\n# Sphinx documentation\ndocs/_build/\ndo"
},
{
"path": "LICENSE",
"chars": 11358,
"preview": "\n Apache License\n Version 2.0, January 2004\n "
},
{
"path": "README.md",
"chars": 9193,
"preview": "# BrainCog\n\n---\n\nBrainCog is an open source spiking neural network based brain-inspired \ncognitive intelligence engine f"
},
{
"path": "braincog/__init__.py",
"chars": 136,
"preview": "# __all__ = ['base', 'datasets', 'model_zoo', 'utils']\n#\n# from . import (\n# base,\n# datasets,\n# model_zoo,\n"
},
{
"path": "braincog/base/__init__.py",
"chars": 227,
"preview": "__all__ = ['node', 'connection', 'learningrule', 'brainarea', 'encoder', 'utils', 'conversion']\n\nfrom . import (\n nod"
},
{
"path": "braincog/base/brainarea/BrainArea.py",
"chars": 3884,
"preview": "import numpy as np\r\nimport torch, os, sys\r\nfrom torch import nn\r\nfrom torch.nn import Parameter\r\n\r\nimport abc\r\nimport ma"
},
{
"path": "braincog/base/brainarea/IPL.py",
"chars": 2848,
"preview": "\r\nfrom braincog.base.learningrule.STDP import *\r\nfrom braincog.base.node.node import *\r\nfrom braincog.base.connection.Cu"
},
{
"path": "braincog/base/brainarea/Insula.py",
"chars": 2284,
"preview": "import numpy as np\r\nimport torch,os,sys\r\nfrom torch import nn\r\nfrom torch.nn import Parameter \r\n\r\nimport abc\r\nimport mat"
},
{
"path": "braincog/base/brainarea/PFC.py",
"chars": 1329,
"preview": "import torch\nfrom torch import nn\nfrom braincog.base.brainarea import BrainArea\nfrom braincog.model_zoo.base_module impo"
},
{
"path": "braincog/base/brainarea/__init__.py",
"chars": 389,
"preview": "from .basalganglia import basalganglia\nfrom .BrainArea import BrainArea, ThreePointForward, Feedback, TwoInOneOut, SelfC"
},
{
"path": "braincog/base/brainarea/basalganglia.py",
"chars": 6113,
"preview": "import numpy as np\r\nimport torch\r\nimport os\r\nimport sys\r\nfrom torch import nn\r\nfrom torch.nn import Parameter\r\n\r\nimport "
},
{
"path": "braincog/base/brainarea/dACC.py",
"chars": 4502,
"preview": "import torch\nimport matplotlib.pyplot as plt\nimport numpy as np\nnp.set_printoptions(threshold=np.inf)\nfrom utils.one_hot"
},
{
"path": "braincog/base/connection/CustomLinear.py",
"chars": 785,
"preview": "import os\r\nimport sys\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torch import nn\r\nfrom torch import einsum\r\nimport torch."
},
{
"path": "braincog/base/connection/__init__.py",
"chars": 295,
"preview": "from .CustomLinear import CustomLinear\nfrom .layer import VotingLayer, WTALayer, NDropout, ThresholdDependentBatchNorm2d"
},
{
"path": "braincog/base/connection/layer.py",
"chars": 8303,
"preview": "import warnings\nimport math\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch import einsum\nfrom torch.nn."
},
{
"path": "braincog/base/conversion/__init__.py",
"chars": 212,
"preview": "from .convertor import HookScale, Hookoutput, Scale, Convertor, SNode\nfrom .merge import mergeConvBN, merge\n\n\n__all__ = "
},
{
"path": "braincog/base/conversion/convertor.py",
"chars": 7110,
"preview": "import torch\nimport torch.nn as nn\nfrom braincog.base.connection.layer import SMaxPool, LIPool\nfrom .merge import mergeC"
},
{
"path": "braincog/base/conversion/merge.py",
"chars": 1138,
"preview": "import torch\nimport torch.nn as nn\n\n\ndef mergeConvBN(m):\n \"\"\"\n 合并网络模块中的卷积与BN层\n \"\"\"\n children = list(m.named_"
},
{
"path": "braincog/base/conversion/spicalib.py",
"chars": 1035,
"preview": "import torch\nimport torch.nn as nn\n\n\nclass SpiCalib(nn.Module):\n def __init__(self, allowance):\n super(SpiCali"
},
{
"path": "braincog/base/encoder/__init__.py",
"chars": 163,
"preview": "from .encoder import Encoder\nfrom .population_coding import PEncoder\nfrom.qs_coding import QSEncoder\n\n\n__all__ = [\n '"
},
{
"path": "braincog/base/encoder/encoder.py",
"chars": 6114,
"preview": "import torch\nimport torch.nn as nn\nfrom einops import rearrange, repeat\nfrom braincog.base.strategy.surrogate import Gat"
},
{
"path": "braincog/base/encoder/population_coding.py",
"chars": 4285,
"preview": "import torch\nimport torch.nn as nn\nimport torchvision.utils\n\nclass PEncoder(nn.Module):\n \"\"\"\n Population coding\n "
},
{
"path": "braincog/base/encoder/qs_coding.py",
"chars": 5453,
"preview": "from signal import signal\nfrom subprocess import call\nimport numpy as np\nimport random\nimport copy\n\n\nclass QSEncoder:\n "
},
{
"path": "braincog/base/learningrule/BCM.py",
"chars": 1764,
"preview": "import numpy as np\nimport torch\nimport os\nimport sys\nfrom torch import nn\nfrom torch.nn import Parameter\n\nimport abc\nimp"
},
{
"path": "braincog/base/learningrule/Hebb.py",
"chars": 1693,
"preview": "import numpy as np\nimport torch\nimport os\nimport sys\nfrom torch import nn\nfrom torch.nn import Parameter\n\nimport abc\nimp"
},
{
"path": "braincog/base/learningrule/RSTDP.py",
"chars": 1531,
"preview": "import numpy as np\nimport torch\nimport os\nimport sys\nfrom torch import nn\nfrom torch.nn import Parameter\n\nimport abc\nimp"
},
{
"path": "braincog/base/learningrule/STDP.py",
"chars": 7263,
"preview": "import numpy as np\r\nimport torch\r\nimport os\r\nimport sys\r\nfrom torch import nn\r\nfrom torch.nn import Parameter\r\n\r\nimport "
},
{
"path": "braincog/base/learningrule/STP.py",
"chars": 6286,
"preview": "import math\n\n\nclass short_time():\n \"\"\"\n 计算短期突触可塑性的变量详见Tsodyks和Markram 1997\n :param Syn:突出可塑性结构体\n "
},
{
"path": "braincog/base/learningrule/__init__.py",
"chars": 281,
"preview": "from .BCM import BCM\nfrom .Hebb import Hebb\nfrom .RSTDP import RSTDP\nfrom .STDP import STDP, MutliInputSTDP, LTP, LTD, F"
},
{
"path": "braincog/base/node/__init__.py",
"chars": 19,
"preview": "from .node import *"
},
{
"path": "braincog/base/node/node.py",
"chars": 54377,
"preview": "# encoding: utf-8\n# Author : Floyed<Floyed_Shen@outlook.com>\n# Datetime : 2022/4/10 18:46\n# User : Floyed\n# Pro"
},
{
"path": "braincog/base/strategy/LateralInhibition.py",
"chars": 890,
"preview": "import warnings\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass LateralInhibition(nn.Module):\n"
},
{
"path": "braincog/base/strategy/__init__.py",
"chars": 101,
"preview": "__all__ = ['surrogate', 'LateralInhibition']\n\nfrom . import (\n surrogate,\n LateralInhibition\n)\n"
},
{
"path": "braincog/base/strategy/surrogate.py",
"chars": 8746,
"preview": "import math\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\ndef heaviside(x):\n return (x >="
},
{
"path": "braincog/base/utils/__init__.py",
"chars": 539,
"preview": "from .criterions import UnilateralMse, MixLoss\nfrom .visualization import plot_tsne, plot_tsne_3d, plot_confusion_matrix"
},
{
"path": "braincog/base/utils/criterions.py",
"chars": 1698,
"preview": "import numpy as np\nimport torch\nimport torch.nn.functional as F\n\nclass UnilateralMse(torch.nn.Module):\n \"\"\"\n 扩展单边的"
},
{
"path": "braincog/base/utils/visualization.py",
"chars": 6750,
"preview": "# encoding: utf-8\n# Author : Floyed<Floyed_Shen@outlook.com>\n# Datetime : 2022/7/1 11:10\n# User : Floyed\n# Prod"
},
{
"path": "braincog/datasets/CUB2002011.py",
"chars": 4323,
"preview": "import os\n\nimport pandas as pd\nfrom torchvision.datasets import VisionDataset\nfrom torchvision.datasets.folder import de"
},
{
"path": "braincog/datasets/ESimagenet/ES_imagenet.py",
"chars": 2572,
"preview": "# -*- coding: utf-8 -*- \n# Time : 2022/11/1 11:06\n# Author : Regulus\n# FileName: ES_imagenet.py\n# Explain: \n#"
},
{
"path": "braincog/datasets/ESimagenet/__init__.py",
"chars": 322,
"preview": "# -*- coding: utf-8 -*- \n# Time : 2022/11/1 11:05\n# Author : Regulus\n# FileName: __init__.py.py\n# Explain: \n#"
},
{
"path": "braincog/datasets/ESimagenet/reconstructed_ES_imagenet.py",
"chars": 3197,
"preview": "# -*- coding: utf-8 -*- \n# Time : 2022/11/1 11:06\n# Author : Regulus\n# FileName: reconstructed_ES_imagenet.py"
},
{
"path": "braincog/datasets/NOmniglot/NOmniglot.py",
"chars": 2430,
"preview": "from torch.utils.data import Dataset\nfrom braincog.datasets.NOmniglot.utils import *\n\n\nclass NOmniglot(Dataset):\n def"
},
{
"path": "braincog/datasets/NOmniglot/__init__.py",
"chars": 196,
"preview": "__all__ = ['NOmniglot', 'nomniglot_full', 'nomniglot_nw_ks','nomniglot_pair','utils']\r\nfrom . import (\r\n NOmniglot,\r\n"
},
{
"path": "braincog/datasets/NOmniglot/nomniglot_full.py",
"chars": 1801,
"preview": "import torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom braincog.datasets.NOmniglot.NOmniglot import NOmniglo"
},
{
"path": "braincog/datasets/NOmniglot/nomniglot_nw_ks.py",
"chars": 3497,
"preview": "import torch\nimport torchvision\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom braincog.datase"
},
{
"path": "braincog/datasets/NOmniglot/nomniglot_pair.py",
"chars": 7060,
"preview": "import torch\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\nfrom numpy.random import choice as npc\n"
},
{
"path": "braincog/datasets/NOmniglot/utils.py",
"chars": 9315,
"preview": "import torch\nimport threading\nimport numpy as np\nimport pandas\nimport os\nfrom dv import AedatFile\n\n\nclass FunctionThread"
},
{
"path": "braincog/datasets/StanfordDogs.py",
"chars": 4495,
"preview": "import os\nimport scipy.io\nfrom os.path import join\nfrom torchvision.datasets import VisionDataset\nfrom torchvision.datas"
},
{
"path": "braincog/datasets/TinyImageNet.py",
"chars": 4622,
"preview": "import os\nimport os\nimport pandas as pd\nimport warnings\nfrom torchvision.datasets import ImageFolder\nfrom torchvision.da"
},
{
"path": "braincog/datasets/__init__.py",
"chars": 1050,
"preview": "from .datasets import build_transform, build_dataset, get_mnist_data, get_fashion_data, \\\n get_cifar10_data, get_cifa"
},
{
"path": "braincog/datasets/bullying10k/__init__.py",
"chars": 36,
"preview": "from .bullying10k import BULLYINGDVS"
},
{
"path": "braincog/datasets/bullying10k/bullying10k.py",
"chars": 2454,
"preview": "import os\nimport numpy as np\nfrom numpy.lib import recfunctions\nimport scipy.io as scio\nfrom typing import Tuple, Any, O"
},
{
"path": "braincog/datasets/cut_mix.py",
"chars": 16337,
"preview": "import math\nimport numpy as np\nimport random\nfrom torch.utils.data.dataset import Dataset\nfrom braincog.datasets.rand_au"
},
{
"path": "braincog/datasets/datasets.py",
"chars": 71215,
"preview": "import os, warnings\n\nimport tonic\nfrom tonic import DiskCachedDataset\n \nimport torch\nimport torch.nn.functional as F\nimp"
},
{
"path": "braincog/datasets/gen_input_signal.py",
"chars": 147,
"preview": "import numpy as np\nimport random\nimport copy\n\ndt = 1.0 # ms\nlambda_max = 0.25 * dt # maximum spike rat"
},
{
"path": "braincog/datasets/hmdb_dvs/__init__.py",
"chars": 255,
"preview": "# encoding: utf-8\n# Author : Floyed<Floyed_Shen@outlook.com>\n# Datetime : 2023/1/30 20:54\n# User : yu\n# Product"
},
{
"path": "braincog/datasets/hmdb_dvs/hmdb_dvs.py",
"chars": 3507,
"preview": "# encoding: utf-8\n# Author : Floyed<Floyed_Shen@outlook.com>\n# Datetime : 2023/1/30 20:54\n# User : yu\n# Product"
},
{
"path": "braincog/datasets/ncaltech101/__init__.py",
"chars": 269,
"preview": "# encoding: utf-8\n# Author : Floyed<Floyed_Shen@outlook.com>\n# Datetime : 2023/1/30 21:26\n# User : yu\n# Product"
},
{
"path": "braincog/datasets/ncaltech101/ncaltech101.py",
"chars": 6833,
"preview": "# encoding: utf-8\n# Author : Floyed<Floyed_Shen@outlook.com>\n# Datetime : 2023/1/30 21:28\n# User : yu\n# Product"
},
{
"path": "braincog/datasets/rand_aug.py",
"chars": 5256,
"preview": "import random\nimport numpy as np\nimport torch\nfrom torchvision import transforms\nfrom torchvision.transforms import func"
},
{
"path": "braincog/datasets/scripts/testlist01.txt",
"chars": 141952,
"preview": "ApplyEyeMakeup/v_ApplyEyeMakeup_g01_c01.avi\nApplyEyeMakeup/v_ApplyEyeMakeup_g01_c02.avi\nApplyEyeMakeup/v_ApplyEyeMakeup_"
},
{
"path": "braincog/datasets/scripts/ucf101_dvs_preprocessing.py",
"chars": 948,
"preview": "# encoding: utf-8\n# Author : Floyed<Floyed_Shen@outlook.com>\n# Datetime : 2022/12/20 20:16\n# User : Floyed\n# Pr"
},
{
"path": "braincog/datasets/ucf101_dvs/__init__.py",
"chars": 264,
"preview": "# encoding: utf-8\n# Author : Floyed<Floyed_Shen@outlook.com>\n# Datetime : 2023/1/30 21:04\n# User : yu\n# Product"
},
{
"path": "braincog/datasets/ucf101_dvs/ucf101_dvs.py",
"chars": 3899,
"preview": "# encoding: utf-8\n# Author : Floyed<Floyed_Shen@outlook.com>\n# Datetime : 2023/1/30 21:05\n# User : yu\n# Product"
},
{
"path": "braincog/datasets/utils.py",
"chars": 545,
"preview": "import torch\nfrom einops import repeat\nfrom braincog.datasets.gen_input_signal import lambda_max\n\n\ndef rescale(x, factor"
},
{
"path": "braincog/model_zoo/NeuEvo/__init__.py",
"chars": 5888,
"preview": "# encoding: utf-8\n# Author : Floyed<Floyed_Shen@outlook.com>\n# Datetime : 2022/9/1 16:43\n# User : Floyed\n# Prod"
},
{
"path": "braincog/model_zoo/NeuEvo/architect.py",
"chars": 3371,
"preview": "import torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport numpy as np\nfrom numpy.linalg im"
},
{
"path": "braincog/model_zoo/NeuEvo/genotypes.py",
"chars": 14796,
"preview": "from collections import namedtuple\n\nimport torch\n\nGenotype = namedtuple('Genotype', 'normal normal_concat')\n\n\"\"\"\nOperati"
},
{
"path": "braincog/model_zoo/NeuEvo/model.py",
"chars": 20348,
"preview": "from functools import partial\nfrom typing import List, Type\n\nfrom braincog.model_zoo.NeuEvo.operations import *\nfrom bra"
},
{
"path": "braincog/model_zoo/NeuEvo/model_search.py",
"chars": 12025,
"preview": "from functools import partial\nfrom braincog.model_zoo.NeuEvo.operations import *\nfrom torch.autograd import Variable\nfro"
},
{
"path": "braincog/model_zoo/NeuEvo/operations.py",
"chars": 16453,
"preview": "import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import *\nimport torch.nn.functional as F\nfrom torch "
},
{
"path": "braincog/model_zoo/NeuEvo/others.py",
"chars": 5598,
"preview": "# encoding: utf-8\n# Author : Floyed<Floyed_Shen@outlook.com>\n# Datetime : 2023/5/22 13:32\n# User : yu\n# Product"
},
{
"path": "braincog/model_zoo/__init__.py",
"chars": 179,
"preview": "__all__ = ['convnet', 'resnet', 'base_module', 'glsnn', 'qsnn', 'resnet19_snn']\n\nfrom . import (\n convnet,\n resnet"
},
{
"path": "braincog/model_zoo/backeinet.py",
"chars": 3641,
"preview": "import numpy as np\nfrom timm.models import register_model\nfrom braincog.model_zoo.base_module import BaseModule, BaseCon"
},
{
"path": "braincog/model_zoo/base_module.py",
"chars": 10770,
"preview": "from functools import partial\nfrom torchvision.ops import DeformConv2d\nfrom braincog.base.node.node import *\nfrom brainc"
},
{
"path": "braincog/model_zoo/bdmsnn.py",
"chars": 6573,
"preview": "\r\nimport torch\r\nfrom torch import nn\r\n\r\nfrom braincog.base.node.node import IFNode, SimHHNode\r\nfrom braincog.base.learni"
},
{
"path": "braincog/model_zoo/convnet.py",
"chars": 11030,
"preview": "import abc\nfrom functools import partial\nfrom torch.nn import functional as F\nimport torchvision\nfrom timm.models import"
},
{
"path": "braincog/model_zoo/fc_snn.py",
"chars": 7570,
"preview": "from functools import partial\nfrom torch.nn import functional as F\nimport torchvision\nfrom timm.models import register_m"
},
{
"path": "braincog/model_zoo/glsnn.py",
"chars": 4042,
"preview": "import abc\r\nfrom functools import partial\r\nfrom timm.models import register_model\r\nfrom braincog.base.node.node import *"
},
{
"path": "braincog/model_zoo/linearNet.py",
"chars": 2346,
"preview": "import torch.nn.functional as F\r\n\r\nfrom braincog.base.strategy.surrogate import *\r\nfrom braincog.base.node.node import I"
},
{
"path": "braincog/model_zoo/nonlinearNet.py",
"chars": 3021,
"preview": "import torch.nn.functional as F\r\n\r\nfrom braincog.base.strategy.surrogate import *\r\nfrom braincog.base.node.node import I"
},
{
"path": "braincog/model_zoo/qsnn.py",
"chars": 9304,
"preview": "import numpy as np\nfrom scipy.linalg import orth\nfrom scipy.special import expit\nfrom scipy.signal import fftconvolve\nim"
},
{
"path": "braincog/model_zoo/resnet.py",
"chars": 17759,
"preview": "'''\nDeep Residual Learning for Image Recognition\nhttps://github.com/pytorch/vision/blob/master/torchvision/models/resnet"
},
{
"path": "braincog/model_zoo/resnet19_snn.py",
"chars": 8443,
"preview": "# encoding: utf-8\n# Author : Floyed<Floyed_Shen@outlook.com>\n# Datetime : 2022/7/26 19:33\n# User : Floyed\n# Pro"
},
{
"path": "braincog/model_zoo/rsnn.py",
"chars": 2399,
"preview": "\r\nimport torch\r\nfrom torch import nn\r\n\r\nfrom braincog.base.node.node import IFNode\r\nfrom braincog.base.learningrule.STDP"
},
{
"path": "braincog/model_zoo/sew_resnet.py",
"chars": 40596,
"preview": "import torch\nimport torch.nn as nn\nfrom copy import deepcopy\n\ntry:\n from torchvision.models.utils import load_state_d"
},
{
"path": "braincog/model_zoo/vgg_snn.py",
"chars": 7077,
"preview": "# encoding: utf-8\n# Author : Floyed<Floyed_Shen@outlook.com>\n# Datetime : 2022/7/26 18:56\n# User : Floyed\n# Pro"
},
{
"path": "braincog/utils.py",
"chars": 5185,
"preview": "import os\r\nimport random\r\nimport math\r\nimport csv\r\nimport numpy as np\r\nimport torch\r\nfrom torch import nn\r\nfrom PIL impo"
},
{
"path": "docs/Makefile",
"chars": 638,
"preview": "# Minimal makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line, and also\n# from the "
},
{
"path": "docs/make.bat",
"chars": 769,
"preview": "@ECHO OFF\n\npushd %~dp0\n\nREM Command file for Sphinx documentation\n\nif \"%SPHINXBUILD%\" == \"\" (\n\tset SPHINXBUILD=sphinx-bu"
},
{
"path": "docs/source/conf.py",
"chars": 2545,
"preview": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common op"
},
{
"path": "docs/source/examples/Brain_Cognitive_Function_Simulation/drosophila.md",
"chars": 596,
"preview": "\n# Drosophila-inspired decision-making SNN\n\n## Run\n \"drosophila.py\" includes the training phase and testing phase.\n\n```"
},
{
"path": "docs/source/examples/Brain_Cognitive_Function_Simulation/index.rst",
"chars": 121,
"preview": "Brain_Cognitive_Function_Simulation\n======================================\n\n.. toctree::\n :maxdepth: 2\n\n drosophil"
},
{
"path": "docs/source/examples/Decision_Making/BDM_SNN.md",
"chars": 633,
"preview": "\n# Brain-inspired Decision-Making Spiking Neural Network\n\n## Run\n \"BDM-SNN.py\" includes the multi-brain regions coordin"
},
{
"path": "docs/source/examples/Decision_Making/RL.md",
"chars": 410,
"preview": "# PL-SDQN\n\nThis repository contains code from our paper [**Solving the Spike Feature Information Vanishing Problem in Sp"
},
{
"path": "docs/source/examples/Decision_Making/index.rst",
"chars": 105,
"preview": "Decision Making\n======================================\n\n.. toctree::\n :maxdepth: 2\n\n RL\n BDM_SNN"
},
{
"path": "docs/source/examples/Knowledge_Representation_and_Reasoning/CKRGSNN.md",
"chars": 809,
"preview": "# Commonsense Knowledge Representation SNN\n\n(https://arxiv.org/abs/2207.05561)\n\nThis repository contains code from our p"
},
{
"path": "docs/source/examples/Knowledge_Representation_and_Reasoning/CRSNN.md",
"chars": 802,
"preview": "# Causal Reasoning SNN\n(https://10.1109/IJCNN52387.2021.9534102)\n\nThis repository contains code from our paper [**A Brai"
},
{
"path": "docs/source/examples/Knowledge_Representation_and_Reasoning/SPSNN.md",
"chars": 889,
"preview": "# Sequence Production SNN\n[]\n\nThis repository contains code from our p"
},
{
"path": "docs/source/examples/Knowledge_Representation_and_Reasoning/index.rst",
"chars": 160,
"preview": "Knowledge Representation and Reasoning\n=========================================\n\n.. toctree::\n :maxdepth: 2\n\n mus"
},
{
"path": "docs/source/examples/Knowledge_Representation_and_Reasoning/musicMemory.md",
"chars": 71,
"preview": "# Music Memory\n\n数据集:http://www.piano-midi.de/\n\n自行下载数据集,数据使用方法见task下的示例。"
},
{
"path": "docs/source/examples/Multi-scale_Brain_Structure_Simulation/Corticothalamic_minicolumn.md",
"chars": 364,
"preview": "# Corticothalamic minicolumn\n\n## Description\nThe anatomical data is saved in the \"tool\" package. The **main.py** create "
},
{
"path": "docs/source/examples/Multi-scale_Brain_Structure_Simulation/HumanBrain.md",
"chars": 787,
"preview": "# Human Brain Simulation\n\n## Description\nHuman Brain Simulation is a large scale brain modeling framework depending on b"
},
{
"path": "docs/source/examples/Multi-scale_Brain_Structure_Simulation/Human_PFC.md",
"chars": 319,
"preview": "# Human PFC\n\n## Input:\n\n* 程序输入六层皮质柱的电生理数据,数据文件名中的数字表示神经元数量。程序默认有背景电流输入。其中data+数字命名的是随机输入刺激的文件,输入图片刺激的文件分别有人类参数的和小鼠参数的。分别"
},
{
"path": "docs/source/examples/Multi-scale_Brain_Structure_Simulation/MacaqueBrain.md",
"chars": 792,
"preview": "# Macaque Brain Simulation\n\n## Description\nMacaque Brain Simulation is a large scale brain modeling framework depending "
},
{
"path": "docs/source/examples/Multi-scale_Brain_Structure_Simulation/index.rst",
"chars": 205,
"preview": "Multi-scale Brain Structure Simulation\n=========================================\n\n.. toctree::\n :maxdepth: 2\n\n Mac"
},
{
"path": "docs/source/examples/Multi-scale_Brain_Structure_Simulation/mouse_brain.md",
"chars": 354,
"preview": "# Mouse Brain\n\n## Input:\n\n* 程序输入213个脑区之间的连接权重的表格。放在谷歌网盘上面,名称是'W_213.xlsx'。\n\n链接:[https://drive.google.com/drive/folders/1"
},
{
"path": "docs/source/examples/Perception_and_Learning/Conversion.md",
"chars": 411,
"preview": "# Conversion Method\nTraining deep spiking neural network with ann-snn conversion\nreplace ReLU and MaxPooling in pytorch "
},
{
"path": "docs/source/examples/Perception_and_Learning/MultisensoryIntegration.md",
"chars": 2112,
"preview": "# Multisensory Integration DEMO\n\nIn `MultisensoryIntegrationDEMO_AM.py` and `MultisensoryIntegrationDEMO_AM.py`, we impl"
},
{
"path": "docs/source/examples/Perception_and_Learning/QSNN.md",
"chars": 559,
"preview": "# Quantum superposition inspired spiking neural network\n\nThis repository contains code from our paper [**Quantum superpo"
},
{
"path": "docs/source/examples/Perception_and_Learning/UnsupervisedSTDP.md",
"chars": 332,
"preview": "# Unsupervised STDP\nThis is an example of training Unsupervised STDP-based spiking neural network. We used a STB-STDP al"
},
{
"path": "docs/source/examples/Perception_and_Learning/img_cls/bp.md",
"chars": 16630,
"preview": "# Script for training high-performance SNNs based on back propagation \nThis is an example of training high-performance S"
},
{
"path": "docs/source/examples/Perception_and_Learning/img_cls/glsnn.md",
"chars": 386,
"preview": "# SNN with global feedback connections\nTraining deep spiking neural network with the global \nfeedback connections and th"
},
{
"path": "docs/source/examples/Perception_and_Learning/img_cls/index.rst",
"chars": 116,
"preview": "Examples for Image Classification\n=================================\n\n.. toctree::\n :maxdepth: 2\n\n bp\n glsnn"
},
{
"path": "docs/source/examples/Perception_and_Learning/index.rst",
"chars": 180,
"preview": "Perception and Learning\n=================================\n\n.. toctree::\n :maxdepth: 2\n\n img_cls/index\n Conversi"
},
{
"path": "docs/source/examples/Social_Cognition/Mirror_Test.md",
"chars": 1607,
"preview": "# Mirror Test\n\nThe mirror_test.py implements the core code of the Multi-Robots Mirror Self-Recognition Test in \"Toward R"
},
{
"path": "docs/source/examples/Social_Cognition/ToM.md",
"chars": 563,
"preview": "# ToM\n\n\n## Requirments\n\n* numpy\n* scipy\n* pytorch >= 1.7.0\n* torchvision\n* pygame\n\n## Run\n### Train \n* the file to be ru"
},
{
"path": "docs/source/examples/Social_Cognition/index.rst",
"chars": 112,
"preview": "Social Cognition\n======================================\n\n.. toctree::\n :maxdepth: 2\n\n ToM\n Mirror_Test\n"
},
{
"path": "docs/source/examples/index.rst",
"chars": 305,
"preview": "Examples\n=================================\n\n.. toctree::\n :maxdepth: 2\n\n Perception_and_Learning/index\n Brain_C"
},
{
"path": "docs/source/index.rst",
"chars": 476,
"preview": ".. braincog documentation master file, created by\n sphinx-quickstart on Sun Apr 10 21:02:06 2022.\n You can adapt thi"
},
{
"path": "docs/source/modules.rst",
"chars": 61,
"preview": "braincog\n========\n\n.. toctree::\n :maxdepth: 4\n\n braincog\n"
},
{
"path": "docs/source/setup.rst",
"chars": 103,
"preview": "setup module\n============\n\n.. automodule:: setup\n :members:\n :undoc-members:\n :show-inheritance:\n"
},
{
"path": "docs.md",
"chars": 943,
"preview": "# Sphinx 文档教程 \n\n## 安装 \n\n131 braincog 环境已经装好了\n```shell\n pip install sphinx sphinx-rtd-theme recommonmark\n``` \n\n## 配置 "
},
{
"path": "documents/Data_engine.md",
"chars": 1901,
"preview": "# BrainCog Data Engine\n\nIn addition to the static datasets, BrainCog supports the commonly used neuromorphic\ndatasets, s"
},
{
"path": "documents/Lectures.md",
"chars": 11786,
"preview": "# Lectures\n\n- [BrainCog Talk] Beginning BrainCog Lecture 32. Structural Modeling and Neural Activity Simulation of Mamma"
},
{
"path": "documents/Pub_brain_inspired_AI.md",
"chars": 29829,
"preview": "# Publications Using BrainCog \n## Brain Inspired AI\n\n\n### Perception and Leanring\n| Papers "
},
{
"path": "documents/Pub_brain_simulation.md",
"chars": 4955,
"preview": "# Publications Using BrainCog \n## Brain Simulation\n\n### Funtion\n\n| Papers "
},
{
"path": "documents/Pub_sh_codesign.md",
"chars": 1802,
"preview": "# Publications Using BrainCog \n## Software-Hardware Co-design\n\n\n### Hardware Acceleration\n| Papers "
},
{
"path": "documents/Publication.md",
"chars": 29867,
"preview": "# Publications Using BrainCog\n\n## 2024\n| Papers "
},
{
"path": "documents/Tutorial.md",
"chars": 6747,
"preview": "# Tutorial\n\n\n- How to Install BrainCog [[English Version](http://www.brain-cog.network/docs/tutorial/1_installation.html"
},
{
"path": "examples/Brain_Cognitive_Function_Simulation/drosophila/README.md",
"chars": 2014,
"preview": "# Drosophila-inspired decision-making SNN\n\n## Run\n\nThe drosophila.py implements the core code of the Drosophila-inspired"
},
{
"path": "examples/Brain_Cognitive_Function_Simulation/drosophila/drosophila.py",
"chars": 6939,
"preview": "import numpy as np\r\nimport torch,os,sys\r\nfrom torch import nn\r\nfrom torch.nn import Parameter\r\n\r\nimport abc\r\nimport math"
},
{
"path": "examples/Embodied_Cognition/RHI/RHI_Test.py",
"chars": 17369,
"preview": "import numpy as np\r\nimport torch,os,sys\r\nfrom torch import nn\r\nfrom torch.nn import Parameter \r\nimport abc\r\nimport math\r"
},
{
"path": "examples/Embodied_Cognition/RHI/RHI_Train.py",
"chars": 18848,
"preview": "import numpy as np\r\nimport torch,os,sys\r\nfrom torch import nn\r\nfrom torch.nn import Parameter \r\nimport abc\r\nimport math\r"
},
{
"path": "examples/Embodied_Cognition/RHI/ReadMe.md",
"chars": 1,
"preview": "\n"
},
{
"path": "examples/Hardware_acceleration/README.md",
"chars": 1654,
"preview": "## FireFly: A High-Throughput Hardware Accelerator for Spiking Neural Networks\n\n### Demo of Deploying SNNs on FPGA platf"
},
{
"path": "examples/Hardware_acceleration/firefly_v1_schedule_on_pynq.py",
"chars": 16828,
"preview": "import numpy as np\nimport tqdm\nfrom standalone_utils import *\nimport math\nimport time\nimport ctypes as ct\n\n\nclass FireFl"
},
{
"path": "examples/Hardware_acceleration/standalone_utils.py",
"chars": 10310,
"preview": "import math\n\nimport numpy as np\nfrom einops import rearrange\n\n\ndef get_im2col_indices(x_shape, field_height, field_width"
},
{
"path": "examples/Hardware_acceleration/ultra96_test.py",
"chars": 3143,
"preview": "from standalone_utils import *\nfrom firefly_v1_schedule_on_pynq import *\nfrom pynq import PL\nfrom pynq import Overlay\nfr"
},
{
"path": "examples/Hardware_acceleration/zcu104_test.py",
"chars": 3142,
"preview": "from standalone_utils import *\nfrom firefly_v1_schedule_on_pynq import *\nfrom pynq import PL\nfrom pynq import Overlay\nfr"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/CKRGSNN/README.md",
"chars": 1877,
"preview": "# Commonsense Knowledge Representation SNN\n\n(https://arxiv.org/abs/2207.05561)\n\nThis repository contains code from our p"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/CKRGSNN/main.py",
"chars": 4803,
"preview": "import time\nimport numpy as np\nimport os\nimport warnings\nimport scipy.io as scio\nimport math\nfrom matplotlib import pypl"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/CKRGSNN/sub_Conceptnet.csv",
"chars": 17680,
"preview": "Relation,Head,Tail,Weight\nantonym,ab_extra,ab_intra,1.0\nantonym,ab_intra,ab_extra,1.0\nantonym,abactinal,actinal,1.0\nanto"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/CRSNN/README.md",
"chars": 1835,
"preview": "# Causal Reasoning SNN\n(https://10.1109/IJCNN52387.2021.9534102)\n\nThis repository contains code from our paper [**A Brai"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/CRSNN/main.py",
"chars": 13093,
"preview": "import time\nimport numpy as np\nimport os\nimport warnings\nimport math\nfrom matplotlib import pyplot as plt\nimport torch\nf"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/SPSNN/README.md",
"chars": 1799,
"preview": "# Sequence Production SNN\n\nThis repository contains code from our paper [**Brain Inspired Sequences Production by Spiki"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/SPSNN/main.py",
"chars": 6175,
"preview": "import time\nimport numpy as np\nimport os\nimport warnings\nimport math\nfrom matplotlib import pyplot as plt\nimport torch\nf"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Areas/apac.py",
"chars": 737,
"preview": "'''\nCreated on 2016.7.7\n\n@author: liangqian\n'''\nfrom Modal.note import Note\nfrom Modal.cluster import Cluster\nfrom conf."
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Areas/cortex.py",
"chars": 24554,
"preview": "'''\nCreated on 2016.7.6\n\n@author: liangqian\n'''\n\nfrom Areas.pfc import PFC\nfrom Areas.pac import PAC\nfrom conf.conf impo"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Areas/pac.py",
"chars": 23634,
"preview": "'''\nPrimary Auditory Area\n'''\n\nfrom braincog.base.brainarea.BrainArea import BrainArea\n\nfrom Modal.sequencememory import"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Areas/pfc.py",
"chars": 13418,
"preview": "import numpy as np\nimport math\nfrom braincog.base.brainarea.PFC import PFC\nfrom Modal.synapse import Synapse\nfrom Modal."
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/PAC.py",
"chars": 680,
"preview": "'''\nPrimary Auditory Cortex\n'''\nimport torch\nfrom braincog.base.node.node import *\nfrom braincog.base.brainarea.BrainAre"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/cluster.py",
"chars": 6268,
"preview": "\nfrom .lifneuron import LIFNeuron\nfrom .synapse import Synapse\nfrom Modal.izhikevichneuron import *\n\nclass Cluster():\n "
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/composercluster.py",
"chars": 555,
"preview": "from .cluster import Cluster\nfrom .composerlifneuron import ComposerLIFNeuron\n\n\nclass ComposerCluster(Cluster):\n '''\n"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/composerlayer.py",
"chars": 710,
"preview": "from .layer import Layer\nfrom .composercluster import ComposerCluster\n\n\nclass ComposerLayer(Layer):\n '''\n This"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/composerlifneuron.py",
"chars": 666,
"preview": "from .lifneuron import LIFNeuron\n\n\nclass ComposerLIFNeuron(LIFNeuron):\n '''\n classdocs\n '''\n\n def __init__(s"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/genrecluster.py",
"chars": 539,
"preview": "from .cluster import Cluster\nfrom .genrelifneuron import GenreLIFNeuron\n\nclass GenreCluster(Cluster):\n '''\n classd"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/genrelayer.py",
"chars": 687,
"preview": "from .layer import Layer\nfrom .genrecluster import GenreCluster\nclass GenreLayer(Layer):\n '''\n This layer defi"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/genrelifneuron.py",
"chars": 660,
"preview": "from .lifneuron import LIFNeuron\n\nclass GenreLIFNeuron(LIFNeuron):\n '''\n classdocs\n '''\n\n def __init__(self,"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/izhikevichneuron.py",
"chars": 12241,
"preview": "'''\nCreated on 2016.4.8\n\n@author: liangqian\n'''\n#from modal.izhikevich import Izhikevich\nfrom braincog.base.node import "
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/layer.py",
"chars": 2002,
"preview": "from abc import ABCMeta,abstractmethod\nfrom conf.conf import configs\nfrom Modal.cluster import *\nclass Layer():\n '''\n"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/lifneuron.py",
"chars": 5187,
"preview": "import torch\nimport random\nfrom braincog.base.node import node\nimport numpy as np\nclass LIFNeuron(node.LIFNode):\n\n de"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/note.py",
"chars": 370,
"preview": "'''\nCreated on 2016.7.6\n\n@author: liangqian\n'''\nfrom Modal.pitch import Pitch\nclass Note():\n '''\n Because a chord "
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/notecluster.py",
"chars": 1381,
"preview": "from .cluster import Cluster\nfrom .notelifneuron import NoteLIFNeuron\nfrom Modal.izhikevichneuron import *\n\nclass NoteCl"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/notelifneuron.py",
"chars": 1858,
"preview": "from .lifneuron import LIFNeuron\n\n\nclass NoteLIFNeuron(LIFNeuron):\n '''\n classdocs\n '''\n\n def __init__(self,"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/notesequencelayer.py",
"chars": 1312,
"preview": "from .sequencelayer import SequenceLayer\nfrom .notecluster import NoteCluster\nfrom .synapse import Synapse\n\n\nclass NoteS"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/pitch.py",
"chars": 242,
"preview": "'''\nCreated on 2018.8.29\n\n@author: liangqian\n'''\n\nclass Pitch():\n '''\n classdocs\n '''\n\n\n def __init__(self):"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/sequencelayer.py",
"chars": 1293,
"preview": "from .layer import Layer\nfrom .cluster import Cluster\nfrom .synapse import Synapse\n\n\nclass SequenceLayer(Layer):\n '''"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/sequencememory.py",
"chars": 8739,
"preview": "from .synapse import Synapse\nfrom Modal.sequencelayer import SequenceLayer\nimport numpy as np\nfrom Modal.synapse import "
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/synapse.py",
"chars": 4095,
"preview": "import math\n\nclass Synapse():\n '''\n classdocs\n '''\n\n def __init__(self, pre, post):\n '''\n Cons"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/tempocluster.py",
"chars": 859,
"preview": "from .cluster import Cluster\nfrom .tempolifneuron import TempoLIFNeuron\nfrom Modal.izhikevichneuron import *\n\nclass Temp"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/tempolifneuron.py",
"chars": 2002,
"preview": "from .lifneuron import LIFNeuron\nimport math\n\n\nclass TempoLIFNeuron(LIFNeuron):\n '''\n classdocs\n '''\n\n def _"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/temposequencelayer.py",
"chars": 1273,
"preview": "from .sequencelayer import SequenceLayer\nfrom .tempocluster import TempoCluster\nfrom .synapse import Synapse\n\n\nclass Tem"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/titlecluster.py",
"chars": 1099,
"preview": "from .cluster import Cluster\nfrom .titlelifneuron import TitleLIFNeuron\n\n\nclass TitleCluster(Cluster):\n '''\n class"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/titlelayer.py",
"chars": 648,
"preview": "\n\nfrom .layer import Layer\nfrom .titlecluster import TitleCluster\n\n\nclass TitleLayer(Layer):\n '''\n classdocs\n '"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/Modal/titlelifneuron.py",
"chars": 1518,
"preview": "from .lifneuron import LIFNeuron\nimport math\n\n\nclass TitleLIFNeuron(LIFNeuron):\n '''\n classdocs\n '''\n\n def _"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/README.md",
"chars": 2847,
"preview": "# Music Memory and stylistic composition\n\nThis repository contains code from our paper:\n- [**Temporal-Sequential Learnin"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/api/music_engine_api.py",
"chars": 14023,
"preview": "\n\nfrom conf.conf import *\nfrom Areas.cortex import Cortex\nimport pretty_midi\nimport math\nimport json\nimport music21 as m"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/conf/GenreData.txt",
"chars": 244,
"preview": "Baroque:Bach\nClassical:Haydn,Mozart,Beethoven,Schubert,Clementi\nRomantic:Mendelssohn,Liszt,Chopin,Schumann,Brahms,Burgmu"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/conf/MIDIData.txt",
"chars": 1501,
"preview": "-1:rest\n0:C3\n1:C sharp3/D flat3\n2:D3\n3:D sharp3/E flat3\n4:E3\n5:F3\n6:F sharp3/G flat3\n7:G3\n8:G sharp3/A flat3\n9:A3\n10:A s"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/conf/conf.py",
"chars": 4261,
"preview": "import numpy\nimport numpy as np\nimport pandas as pd\nclass Conf():\n '''\n classdocs\n '''\n\n def __init__(self, "
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/inputs/1.txt",
"chars": 707,
"preview": "1:A-B2\n2:A#-B2\n3:B-B2\n4:C-B1\n5:C#-B1\n6:D-B1\n7:D#-B1\n8:E-B1\n9:F-B1\n10:F#-B1\n11:G-B1\n12:G#-B1\n13:A-B1\n14:A#-B1\n15:B-B1\n16:"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/inputs/Data.txt",
"chars": 530,
"preview": "1:A2\n2:A#2\n3:B2\n4:C1\n5:C#1\n6:D1\n7:D#1\n8:E1\n9:F1\n10:F#1\n11:G1\n12:G#1\n13:A1\n14:A#1\n15:B1\n16:C\n17:C#\n18:D\n19:D#\n20:E\n21:F\n2"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/inputs/GenreData.txt",
"chars": 244,
"preview": "Baroque:Bach\nClassical:Haydn,Mozart,Beethoven,Schubert,Clementi\nRomantic:Mendelssohn,Liszt,Chopin,Schumann,Brahms,Burgmu"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/inputs/MIDIData.txt",
"chars": 1501,
"preview": "-1:rest\n0:C3\n1:C sharp3/D flat3\n2:D3\n3:D sharp3/E flat3\n4:E3\n5:F3\n6:F sharp3/G flat3\n7:G3\n8:G sharp3/A flat3\n9:A3\n10:A s"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/inputs/chords.csv",
"chars": 150,
"preview": "C,,,\r\n1,C,E,G\r\n4,F,A,C\r\n5,G,B,D\r\n,,,\r\na,,,\r\n1,A,C,E\r\n4,D,F,A\r\n5,E,G#,B\r\n,,,\r\nG,,,\r\n1,G,B,D\r\n4,C,E,G\r\n5,D,F#,A\r\n,,,\r\nF,,,"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/inputs/information.csv",
"chars": 13697,
"preview": "chpn_op35_2.mid,Chopin,Romantic,unclear\nchpn_op33_4.mid,Chopin,Romantic,unclear\nchpn-p14.mid,Chopin,Romantic,unclear\nchp"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/inputs/keyIndex.csv",
"chars": 336,
"preview": "C major,0\na minor,1\nG major,2\ne minor,3\nD major,4\nb minor,5\nA major,6\nf# minor,7\nE major,8\nc# minor,9\nB major,10\ng# mino"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/inputs/keys.csv",
"chars": 696,
"preview": "1,-1,2,-1,3,4,-1,5,-1,6,-1,7\n3,-1,4,-1,5,6,-1,-1,7,1,-1,2\n4,-1,5,-1,6,-1,7,1,-1,2,-1,3\n6,-1,-1,7,1,-1,2,3,-1,4,-1,5\n-1,7"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/inputs/modeindex.csv",
"chars": 16,
"preview": "0,major\n1,minor\n"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/inputs/pitch2midi.csv",
"chars": 788,
"preview": "C,0,12,24,36,48,60,72,84,96,108,120\r\nC#,1,13,25,37,49,61,73,85,97,109,121\r\nC-,11,23,35,47,59,71,83,95,107,119,\r\nD,2,14,2"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/inputs/tones2.csv",
"chars": 872,
"preview": ",0,1,2,3,4,5,6,7,8,9,10,11\r\nC major,2,-1,1,-1,1,1,-1,1,-1,1,-1,1\r\na minor,1,-1,1,-1,1,1,-1,-1,2,2,-1,1\r\nG major,1,-1,1,-"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/task/mode-conditioned learning.py",
"chars": 1637,
"preview": "import sys\nimport os\nimport time\nsys.path.append(\"../../../../\")\nsys.path.append(\"../\")\nimport numpy as np\nimport music2"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/task/musicGeneration.py",
"chars": 1446,
"preview": "import sys\nsys.path.append(\"../../../../\")\nsys.path.append(\"../\")\nfrom api.music_engine_api import EngineAPI\nimport os\n\n"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/task/musicMemory.py",
"chars": 1007,
"preview": "import sys\nsys.path.append(\"../\")\nsys.path.append(\"../../../../\")\nfrom api.music_engine_api import EngineAPI\nimport os\n\n"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/tools/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/tools/generateData.py",
"chars": 1691,
"preview": "'''\nCreated on 2016.4.27\n\n@author: liangqian\n'''\nimport json\nimport random\n\nclass joint():\n def __init__(self):\n "
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/tools/hamonydataset_test.py",
"chars": 910,
"preview": "import os\nimport sys\nimport music21 as m21\nimport numpy as np\nfirstnotes = np.array([[m21.pitch.Pitch('b-4').midi],\n "
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/tools/msg.py",
"chars": 754,
"preview": "'''\nCreated on 2016.6.8\n\n@author: liangqian\n'''\nimport time\nimport sys\nimport stomp\nclass MyListener(object):\n def on"
},
{
"path": "examples/Knowledge_Representation_and_Reasoning/musicMemory/tools/msgq.py",
"chars": 488,
"preview": "'''\nCreated on 2018.9.7\n\n@author: liangqian\n'''\nimport time\nimport sys\nimport stomp\n\ndef createMSQ():\n queue_name = '"
}
]
// ... and 485 more files (download for full content)
About this extraction
This page contains the full source code of the BrainCog-X/Brain-Cog GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 685 files (4.5 MB), approximately 1.2M tokens, and a symbol index with 5174 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.