Showing preview only (5,775K chars total). Download the full file or copy to clipboard to get everything.
Repository: DES-Lab/AALpy
Branch: master
Commit: 1edacf63c4b3
Files: 225
Total size: 29.8 MB
Directory structure:
gitextract_arfvw_hu/
├── .gitattributes
├── .github/
│ └── workflows/
│ ├── codeql-analysis.yml
│ └── python-app.yml
├── .gitignore
├── Benchmarking/
│ ├── Benchmark_ErrorStop.py
│ ├── CompleteStochasticBenchmarking.py
│ ├── StochasticAlgComparison.py
│ ├── StochasticBenchmarkingWPrism.py
│ ├── StopWithErorrRate.py
│ ├── all_results.pickle
│ ├── benchmark.py
│ ├── benchmark_alphabet_increase.py
│ ├── benchmark_size_increase.py
│ ├── cex_processing_benchmark.py
│ ├── compare_lstar_and_kv.py
│ ├── error_benchmark_statistics.py
│ ├── evaluate_l_star_configurations.py
│ ├── fm_benchmark.py
│ ├── fm_plots.py
│ ├── generate_plots.py
│ ├── json_lbt.py
│ ├── papni_sequences.pickle
│ ├── papni_vs_rpni_benchmarking.py
│ ├── passive_mdp_vs_smm.py
│ ├── prism_eval_props/
│ │ ├── bluetooth.props
│ │ ├── emqtt_two_client.props
│ │ ├── first_eval.props
│ │ ├── second_eval.props
│ │ ├── shared_coin_eval.props
│ │ ├── slot_machine_eval.props
│ │ └── tcp_eval.props
│ ├── rpni_papni_memory_footrpint.py
│ ├── stochastic_benchmarking/
│ │ ├── Benchmark_ErrorStop.py
│ │ ├── CompleteStochasticBenchmarking.py
│ │ ├── StochasticBenchmarkingWPrism.py
│ │ ├── passive_mdp_vs_smm.py
│ │ ├── plot_error_steps.py
│ │ ├── stochastic_benchmark_random_automata.py
│ │ ├── strategy_comp.py
│ │ └── unamb_error_plot.py
│ ├── unamb_error_plot.py
│ └── vpa_benchmarking/
│ └── benchmark_vpa.py
├── DotModels/
│ ├── Angluin_Mealy.dot
│ ├── Angluin_Moore.dot
│ ├── Bluetooth/
│ │ ├── CC2640R2-no-feature-req.dot
│ │ ├── CC2640R2-no-feature-req_stochastic.dot
│ │ ├── CC2640R2-no-pairing-req.dot
│ │ ├── CC2650.dot
│ │ ├── CYBLE-416045-02.dot
│ │ ├── CYBLE-416045-02_Crash_No_Response_stochastic.dot
│ │ ├── CYW43455.dot
│ │ ├── CYW43455_stochastic.dot
│ │ ├── bluetooth_model.dot
│ │ ├── bluetooth_reduced.dot
│ │ ├── cc2652r1.dot
│ │ ├── convert_to_stochastic.py
│ │ └── nRF52832.dot
│ ├── MDPs/
│ │ ├── bluetooth.dot
│ │ ├── faulty_car_alarm.dot
│ │ ├── first_grid.dot
│ │ ├── mqtt.dot
│ │ ├── second_grid.dot
│ │ ├── shared_coin.dot
│ │ ├── slot_machine.dot
│ │ └── tcp.dot
│ ├── MQTT/
│ │ ├── ActiveMQ__two_client_will_retain.dot
│ │ ├── VerneMQ__two_client_will_retain.dot
│ │ ├── emqtt__two_client_will_retain.dot
│ │ ├── hbmqtt__two_client_will_retain.dot
│ │ └── mosquitto__two_client_will_retain.dot
│ ├── SimpleABC/
│ │ ├── simple_abc_dfa.dot
│ │ ├── simple_abc_mealy.dot
│ │ └── simple_abc_moore.dot
│ ├── TCP/
│ │ ├── TCP_Linux_Client.dot
│ │ ├── tcp_server_bsd_trans.dot
│ │ ├── tcp_server_ubuntu_trans.dot
│ │ └── tcp_server_windows_trans.dot
│ ├── TLS/
│ │ ├── JSSE_1.8.0_25_server_regular.dot
│ │ ├── NSS_3.17.4_server_regular.dot
│ │ ├── OpenSSL_1.0.2_server_regular.dot
│ │ ├── RSA_BSAFE_C_4.0.4_server_regular.dot
│ │ └── miTLS_0.1.3_server_regular.dot
│ ├── arithmetics.dot
│ ├── car_alarm.dot
│ ├── coffee_mealy.dot
│ ├── coffee_moore.dot
│ ├── five_clients_mqtt_abstracted_onfsm.dot
│ ├── mooreModel.dot
│ ├── onfsm_0.dot
│ ├── onfsm_1.dot
│ ├── onfsm_2.dot
│ ├── onfsm_3.dot
│ ├── onfsm_4.dot
│ ├── onfsm_5.dot
│ └── tomitaGrammars/
│ ├── tomita_1.dot
│ ├── tomita_2.dot
│ ├── tomita_3.dot
│ ├── tomita_4.dot
│ ├── tomita_5.dot
│ ├── tomita_6.dot
│ └── tomita_7.dot
├── Examples.py
├── LICENCE.txt
├── README.md
├── aalpy/
│ ├── SULs/
│ │ ├── AutomataSUL.py
│ │ ├── PyMethodSUL.py
│ │ ├── RegexSUL.py
│ │ ├── TomitaSUL.py
│ │ └── __init__.py
│ ├── __init__.py
│ ├── automata/
│ │ ├── Dfa.py
│ │ ├── MarkovChain.py
│ │ ├── Mdp.py
│ │ ├── MealyMachine.py
│ │ ├── MooreMachine.py
│ │ ├── NonDeterministicMooreMachine.py
│ │ ├── Onfsm.py
│ │ ├── Sevpa.py
│ │ ├── StochasticMealyMachine.py
│ │ ├── Vpa.py
│ │ └── __init__.py
│ ├── base/
│ │ ├── Automaton.py
│ │ ├── CacheTree.py
│ │ ├── Oracle.py
│ │ ├── SUL.py
│ │ └── __init__.py
│ ├── learning_algs/
│ │ ├── __init__.py
│ │ ├── adaptive/
│ │ │ ├── AdaptiveLSharp.py
│ │ │ ├── AdaptiveObservationTree.py
│ │ │ ├── StateMatching.py
│ │ │ └── __init__.py
│ │ ├── deterministic/
│ │ │ ├── ADS.py
│ │ │ ├── Apartness.py
│ │ │ ├── ClassificationTree.py
│ │ │ ├── CounterExampleProcessing.py
│ │ │ ├── KV.py
│ │ │ ├── LSharp.py
│ │ │ ├── LStar.py
│ │ │ ├── ObservationTable.py
│ │ │ ├── ObservationTree.py
│ │ │ └── __init__.py
│ │ ├── deterministic_passive/
│ │ │ ├── ClassicRPNI.py
│ │ │ ├── GsmRPNI.py
│ │ │ ├── PAPNI.py
│ │ │ ├── RPNI.py
│ │ │ ├── __init__.py
│ │ │ ├── active_RPNI.py
│ │ │ └── rpni_helper_functions.py
│ │ ├── general_passive/
│ │ │ ├── GeneralizedStateMerging.py
│ │ │ ├── GsmAlgorithms.py
│ │ │ ├── GsmNode.py
│ │ │ ├── Instrumentation.py
│ │ │ ├── ScoreFunctionsGSM.py
│ │ │ └── __init__.py
│ │ ├── non_deterministic/
│ │ │ ├── AbstractedOnfsmLstar.py
│ │ │ ├── AbstractedOnfsmObservationTable.py
│ │ │ ├── NonDeterministicSULWrapper.py
│ │ │ ├── OnfsmLstar.py
│ │ │ ├── OnfsmObservationTable.py
│ │ │ ├── TraceTree.py
│ │ │ └── __init__.py
│ │ ├── stochastic/
│ │ │ ├── DifferenceChecker.py
│ │ │ ├── SamplingBasedObservationTable.py
│ │ │ ├── StochasticCexProcessing.py
│ │ │ ├── StochasticLStar.py
│ │ │ ├── StochasticTeacher.py
│ │ │ └── __init__.py
│ │ └── stochastic_passive/
│ │ ├── ActiveAleriga.py
│ │ ├── Alergia.py
│ │ ├── CompatibilityChecker.py
│ │ ├── FPTA.py
│ │ └── __init__.py
│ ├── oracles/
│ │ ├── BreadthFirstExplorationEqOracle.py
│ │ ├── CacheBasedEqOracle.py
│ │ ├── KWayStateCoverageEqOracle.py
│ │ ├── KWayTransitionCoverageEqOracle.py
│ │ ├── PacOracle.py
│ │ ├── PerfectKnowledgeEqOracle.py
│ │ ├── ProvidedSequencesOracleWrapper.py
│ │ ├── RandomWalkEqOracle.py
│ │ ├── RandomWordEqOracle.py
│ │ ├── StatePrefixEqOracle.py
│ │ ├── TransitionFocusOracle.py
│ │ ├── UserInputEqOracle.py
│ │ ├── WMethodEqOracle.py
│ │ ├── WpMethodEqOracle.py
│ │ └── __init__.py
│ ├── paths.py
│ └── utils/
│ ├── AutomatonGenerators.py
│ ├── BenchmarkSULs.py
│ ├── BenchmarkSevpaModels.py
│ ├── BenchmarkVpaModels.py
│ ├── DataHandler.py
│ ├── FileHandler.py
│ ├── HelperFunctions.py
│ ├── ModelChecking.py
│ ├── Sampling.py
│ └── __init__.py
├── docs/
│ ├── README.md
│ ├── _config.yml
│ ├── google306875680a34d740.html
│ └── instructions.txt
├── jAlergia/
│ ├── alergia.jar
│ └── exampleMdpData.txt
├── notebooks/
│ ├── Abstracted_Non-Det_FSM.ipynb
│ ├── AngluinExample.ipynb
│ ├── MDP_Example.ipynb
│ ├── MDP_and_SMM_Example.ipynb
│ ├── ONFSM_Example.ipynb
│ ├── RandomMealyExample.ipynb
│ ├── RegexExample.ipynb
│ └── Stochstic_Examples.ipynb
├── setup.py
└── tests/
├── oracles/
│ ├── test_baseOracle.py
│ └── test_kWayTransitionCoverageEqOracle.py
├── test_charSet.py
├── test_deterministic.py
├── test_deterministic_passive.py
├── test_file_operations.py
├── test_non_deterministic.py
├── test_rwpmethod_oracle.py
├── test_stochastic.py
├── test_wmethod_oracle.py
├── test_wpmethod_oracle.py
└── tests_imports.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitattributes
================================================
* linguist-vendored
*.py linguist-vendored=false
================================================
FILE: .github/workflows/codeql-analysis.yml
================================================
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ master ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master ]
schedule:
- cron: '17 10 * * 6'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# ℹ️ Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1
================================================
FILE: .github/workflows/python-app.yml
================================================
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: Python application
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 pytest
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
================================================
FILE: .gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
main.py
LearnedModel.pdf
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
.vscode/settings.json
# PyCharm
.idea/
================================================
FILE: Benchmarking/Benchmark_ErrorStop.py
================================================
import random
import os
import aalpy.paths
from aalpy.SULs import AutomatonSUL
from aalpy.learning_algs import run_stochastic_Lstar
from aalpy.oracles.RandomWordEqOracle import UnseenOutputRandomWordEqOracle
from aalpy.utils import load_automaton_from_file, get_properties_file, get_correct_prop_values
from aalpy.utils import model_check_experiment
from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion
seeds = [1212,4557,19059,468,43,654,235345,6546,76768,4563,543526,777676,5555,776767,87878787,98989,60967553,3866677,1555841,8638]
path_to_dir = '../DotModels/MDPs/'
files = ['first_grid.dot', 'second_grid.dot'] # 'slot_machine.dot' ,'shared_coin.dot' 'mqtt.dot', 'tcp.dot'
prop_folder = 'prism_eval_props/'
# TODO Change the path to your PRIMS executable and change the path_to_prism in the stop_based_on_confidence method in ModelChecking.py.
prism_executable = "/home/mtappler/Programs/prism-4.4-linux64/bin/prism"
aalpy.paths.path_to_prism = "C:/Program Files/prism-4.6/bin/prism.bat"
aalpy.paths.path_to_properties = "prism_eval_props/"
n_c = 20
n_resample = 1000
min_rounds = 10
max_rounds = 300
experiment_repetition = 5
uniform_parameters = False
strategy = ["normal"] # chi_square
cex_sampling = [None] # random:100:0.15
cex_processing = [None] # add a single prefix
for strat in strategy:
for cex_stat in cex_sampling:
for cex_proc in cex_processing:
print(strat, cex_stat, cex_proc)
benchmark_dir = f'FM_mdp_smm_error_based_stop/benchmark_{strat}_{cex_stat}_{cex_proc}/'
if not os.path.exists(benchmark_dir):
os.makedirs(benchmark_dir)
for seed in range(experiment_repetition):
print(seed)
random.seed(seeds[seed])
text_file = open(f"{benchmark_dir}/exp_{seed}.csv", "w")
for file in files:
print(file)
exp_name = file.split('.')[0]
if uniform_parameters:
if exp_name == 'first_grid':
n_c, n_resample = n_c, n_resample
elif exp_name == 'second_grid':
n_c, n_resample = n_c, n_resample
elif exp_name == 'shared_coin':
n_c, n_resample = n_c, n_resample
elif exp_name == 'slot_machine':
n_c, n_resample = n_c, n_resample
elif exp_name == 'mqtt':
n_c, n_resample = n_c, n_resample
elif exp_name == 'tcp':
n_c, n_resample = n_c, n_resample
else:
if exp_name == 'first_grid':
n_c, n_resample = 20, 1000
elif exp_name == 'second_grid':
n_c, n_resample = 20, 2000
elif exp_name == 'shared_coin':
n_c, n_resample = 25, 2500
elif exp_name == 'slot_machine':
n_c, n_resample = 30, 5000
elif exp_name == 'mqtt':
n_c, n_resample = 20, 1000
elif exp_name == 'tcp':
n_c, n_resample = 20, 1000
stopping_data = (get_properties_file(exp_name), get_correct_prop_values(exp_name), 0.02)
original_mdp = load_automaton_from_file(path_to_dir + file, automaton_type='mdp')
input_alphabet = original_mdp.get_input_alphabet()
mdp_sul = AutomatonSUL(original_mdp)
eq_oracle = UnseenOutputRandomWordEqOracle(input_alphabet, mdp_sul, num_walks=150, min_walk_len=5,
max_walk_len=15, reset_after_cex=True)
learned_mdp, data_mdp = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='mdp',
n_c=n_c, n_resample=n_resample, min_rounds=min_rounds, strategy=strat,
max_rounds=max_rounds, return_data=True, samples_cex_strategy=cex_stat,
print_level=1, cex_processing=cex_proc, property_based_stopping=stopping_data)
del mdp_sul
del eq_oracle
random.seed(seeds[seed])
mdp_sul = AutomatonSUL(original_mdp)
eq_oracle = UnseenOutputRandomWordEqOracle(input_alphabet, mdp_sul, num_walks=150, min_walk_len=5,
max_walk_len=15, reset_after_cex=True)
learned_smm, data_smm = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='smm',
n_c=n_c, n_resample=n_resample, min_rounds=min_rounds, strategy=strat,
max_rounds=max_rounds, return_data=True, samples_cex_strategy=cex_stat,
print_level=1, cex_processing=cex_proc, property_based_stopping=stopping_data)
smm_2_mdp = smm_to_mdp_conversion(learned_smm)
mdp_results, mdp_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), learned_mdp)
smm_results, smm_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), smm_2_mdp)
properties_string_header = ",".join([f'{key}_val,{key}_err' for key in mdp_results.keys()])
property_string_mdp = ",".join([f'{str(mdp_results[p])},{str(mdp_err[p])}' for p in mdp_results.keys()])
property_string_smm = ",".join([f'{str(smm_results[p])},{str(smm_err[p])}' for p in smm_results.keys()])
text_file.write('Exp_Name, n_c, n_resample, Final Hypothesis Size, Learning time,'
'Eq. Query Time, Learning Rounds, #MQ Learning, # Steps Learning,'
f'# MQ Eq.Queries, # Steps Eq.Queries , {properties_string_header}\n')
text_file.write(f'learned_mdp_{exp_name},{n_c},{n_resample}, {data_mdp["automaton_size"]}, '
f'{data_mdp["learning_time"]}, {data_mdp["eq_oracle_time"]}, '
f'{data_mdp["learning_rounds"]}, {data_mdp["queries_learning"]}, {data_mdp["steps_learning"]},'
f'{data_mdp["queries_eq_oracle"]}, {data_mdp["steps_eq_oracle"]},'
f'{property_string_mdp}\n')
text_file.write(f'learned_smm_{exp_name},{n_c},{n_resample}, {data_smm["automaton_size"]}, '
f'{data_smm["learning_time"]}, {data_smm["eq_oracle_time"]}, '
f'{data_smm["learning_rounds"]}, {data_smm["queries_learning"]}, {data_smm["steps_learning"]},'
f'{data_smm["queries_eq_oracle"]}, {data_smm["steps_eq_oracle"]},'
f'{property_string_smm}\n')
text_file.flush()
text_file.close()
================================================
FILE: Benchmarking/CompleteStochasticBenchmarking.py
================================================
import random
import time
import aalpy.paths
from aalpy.SULs import AutomatonSUL
from aalpy.learning_algs import run_stochastic_Lstar
from aalpy.oracles.RandomWordEqOracle import RandomWordEqOracle
from aalpy.utils import load_automaton_from_file, get_properties_file, get_correct_prop_values
from aalpy.utils import model_check_experiment
from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion
seeds = [29334,1554,9430459,92344168,55451679,569315,7776892,3875261,811,51,766603,778438967,9819877,6755560,52903,5257,4635,358,1441,838]
path_to_dir = '../DotModels/MDPs/'
#files = ['first_grid.dot', 'second_grid.dot', 'slot_machine.dot', 'mqtt.dot', 'tcp.dot'] # 'slot_machine.dot' ,'shared_coin.dot'
files = ['second_grid.dot', 'mqtt.dot'] # 'slot_machine.dot' ,'shared_coin.dot'
prop_folder = 'prism_eval_props/'
# TODO Change the path to your PRIMS executable
aalpy.paths.path_to_prism = "C:/Program Files/prism-4.6/bin/prism.bat"
aalpy.paths.path_to_properties = "prism_eval_props/"
n_c = 10
n_resample = 1000
min_rounds = 25
max_rounds = 500
experiment_repetition = 10
uniform_parameters = False
strategy = ["normal", "chi2"] # chi_square
cex_sampling = ['bfs',] # random:100:0.15
cex_processing = [None, 'longest_prefix'] # add a single prefix
start = time.time()
model_dict = {m.split('.')[0] : load_automaton_from_file(path_to_dir + m, automaton_type='mdp') for m in files}
for strat in strategy:
for cex_stat in cex_sampling:
for cex_proc in cex_processing:
print(strat, cex_stat, cex_proc)
benchmark_dir = f'FM_mdp_smm/benchmark_22_04_{strat}_{cex_proc}/'
for seed in range(experiment_repetition):
print(seed)
random.seed(seeds[seed])
import os
if not os.path.exists(benchmark_dir):
os.makedirs(benchmark_dir)
text_file = open(f"{benchmark_dir}/exp_{seed}.csv", "w")
for file in files:
print(file)
exp_name = file.split('.')[0]
original_mdp = model_dict[exp_name]
input_alphabet = original_mdp.get_input_alphabet()
mdp_sul = AutomatonSUL(original_mdp)
eq_oracle = RandomWordEqOracle(input_alphabet, mdp_sul, num_walks=150, min_walk_len=5,
max_walk_len=16, reset_after_cex=True)
learned_mdp, data_mdp = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='mdp',
min_rounds=min_rounds, strategy=strat,
max_rounds=max_rounds, return_data=True, samples_cex_strategy=cex_stat,
print_level=1, cex_processing=cex_proc, target_unambiguity=0.99)
del mdp_sul
del eq_oracle
random.seed(seeds[seed])
mdp_sul = AutomatonSUL(original_mdp)
eq_oracle = RandomWordEqOracle(input_alphabet, mdp_sul, num_walks=150, min_walk_len=5,
max_walk_len=15, reset_after_cex=True)
learned_smm, data_smm = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='smm',
min_rounds=min_rounds, strategy=strat,
max_rounds=max_rounds, return_data=True, samples_cex_strategy=cex_stat,
print_level=1, cex_processing=cex_proc, target_unambiguity=0.99)
smm_2_mdp = smm_to_mdp_conversion(learned_smm)
mdp_results, mdp_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), learned_mdp)
smm_results, smm_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), smm_2_mdp)
properties_string_header = ",".join([f'{key}_val,{key}_err' for key in mdp_results.keys()])
property_string_mdp = ",".join([f'{str(mdp_results[p])},{str(mdp_err[p])}' for p in mdp_results.keys()])
property_string_smm = ",".join([f'{str(smm_results[p])},{str(smm_err[p])}' for p in smm_results.keys()])
text_file.write('Exp_Name,n_c,n_resample,Final Hypothesis Size,Learning time,'
'Eq. Query Time,Learning Rounds,#MQ Learning,# Steps Learning,'
f'# MQ Eq.Queries,# Steps Eq.Queries ,{properties_string_header}\n')
text_file.write(f'learned_mdp_{exp_name},{n_c},{n_resample},{data_mdp["automaton_size"]},'
f'{data_mdp["learning_time"]},{data_mdp["eq_oracle_time"]},'
f'{data_mdp["learning_rounds"]},{data_mdp["queries_learning"]},{data_mdp["steps_learning"]},'
f'{data_mdp["queries_eq_oracle"]},{data_mdp["steps_eq_oracle"]},'
f'{property_string_mdp}\n')
text_file.write(f'learned_smm_{exp_name},{n_c},{n_resample},{data_smm["automaton_size"]},'
f'{data_smm["learning_time"]},{data_smm["eq_oracle_time"]}, '
f'{data_smm["learning_rounds"]},{data_smm["queries_learning"]},{data_smm["steps_learning"]},'
f'{data_smm["queries_eq_oracle"]},{data_smm["steps_eq_oracle"]},'
f'{property_string_smm}\n')
text_file.flush()
text_file.close()
print('Exp duration', time.time() - start)
================================================
FILE: Benchmarking/StochasticAlgComparison.py
================================================
import random
import time
from statistics import mean
import aalpy.paths
from aalpy.SULs import AutomatonSUL
from aalpy.learning_algs import run_stochastic_Lstar, run_Alergia
from aalpy.oracles.RandomWordEqOracle import RandomWordEqOracle
from aalpy.utils import load_automaton_from_file, get_properties_file, get_correct_prop_values
from aalpy.utils import model_check_experiment
from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion
path_to_dir = '../DotModels/MDPs/'
files = ['first_grid.dot', 'second_grid.dot', 'slot_machine.dot', 'mqtt.dot', 'tcp.dot', 'bluetooth.dot'] #
prop_folder = 'prism_eval_props/'
aalpy.paths.path_to_prism = "C:/Program Files/prism-4.6/bin/prism.bat"
aalpy.paths.path_to_properties = "prism_eval_props/"
model_dict = {m.split('.')[0]: load_automaton_from_file(path_to_dir + m, automaton_type='mdp') for m in files}
for file in files:
print(file)
exp_name = file.split('.')[0]
print('--------------------------------------------------')
print('Experiment:', exp_name)
original_mdp = model_dict[exp_name]
input_alphabet = original_mdp.get_input_alphabet()
mdp_sul = AutomatonSUL(original_mdp)
eq_oracle = RandomWordEqOracle(input_alphabet, mdp_sul, num_walks=500, min_walk_len=5,
max_walk_len=16, reset_after_cex=True)
learned_classic_mdp, data_mdp = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='mdp',
min_rounds=10, strategy='classic', n_c=20, n_resample=2000,
stopping_range_dict={},
max_rounds=200, return_data=True, target_unambiguity=0.98,
print_level=1)
del mdp_sul
del eq_oracle
mdp_sul = AutomatonSUL(original_mdp)
eq_oracle = RandomWordEqOracle(input_alphabet, mdp_sul, num_walks=150, min_walk_len=5,
max_walk_len=15, reset_after_cex=True)
learned_smm, data_smm = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='smm',
min_rounds=10, strategy='normal',
max_rounds=200, return_data=True, target_unambiguity=0.98,
print_level=1)
smm_2_mdp = smm_to_mdp_conversion(learned_smm)
mdp_results, mdp_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), learned_classic_mdp)
smm_results, smm_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), smm_2_mdp)
num_alergia_samples = max([data_mdp["queries_learning"] + data_mdp["queries_eq_oracle"],
data_smm["queries_learning"] + data_smm["queries_eq_oracle"]])
alergia_samples = []
for _ in range(num_alergia_samples):
sample = [mdp_sul.pre()]
for _ in range(random.randint(10, 30)):
action = random.choice(input_alphabet)
output = mdp_sul.step(action)
sample.append((action, output))
alergia_samples.append(sample)
alergia_model = run_Alergia(alergia_samples, automaton_type='mdp')
alergia_results, alergia_error = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), alergia_model)
print('Classic MDP learning', mean(mdp_err.values()), mdp_err)
print('SMM learning', mean(smm_err.values()), smm_err)
print('Alergia learning', mean(alergia_error.values()), alergia_error)
print('Classic MDP traces', data_mdp["queries_learning"] + data_mdp["queries_eq_oracle"])
print('SMM learning traces', data_smm["queries_learning"] + data_smm["queries_eq_oracle"])
================================================
FILE: Benchmarking/StochasticBenchmarkingWPrism.py
================================================
import random
import aalpy.paths
from aalpy.SULs import AutomatonSUL
from aalpy.learning_algs import run_stochastic_Lstar
from aalpy.oracles.RandomWalkEqOracle import RandomWalkEqOracle
from aalpy.utils import load_automaton_from_file, get_correct_prop_values, get_properties_file
from aalpy.utils import model_check_experiment
from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion
path_to_dir = '../DotModels/MDPs/'
files = ['first_grid.dot', 'second_grid.dot',
'slot_machine.dot', 'mqtt.dot', 'tcp.dot'] # 'shared_coin.dot'
aalpy.paths.path_to_prism = "C:/Program Files/prism-4.6/bin/prism.bat"
aalpy.paths.path_to_properties = "prism_eval_props/"
n_c = 20
n_resample = 1000
min_rounds = 10
max_rounds = 1000
strategy = "normal"
for seed in range(1, 4):
random.seed(seed)
benchmark_dir = f"benchmark_complete_no_cq/benchmark_data_{seed}"
import os
if not os.path.exists(benchmark_dir):
os.makedirs(benchmark_dir)
text_file = open(f"{benchmark_dir}/StochasticExperiments.csv", "w")
uniform_parameters = True
for file in files:
exp_name = file.split('.')[0]
if uniform_parameters:
if exp_name == 'first_grid':
n_c, n_resample = n_c, n_resample
elif exp_name == 'second_grid':
n_c, n_resample = n_c, n_resample
elif exp_name == 'shared_coin':
n_c, n_resample = n_c, n_resample
elif exp_name == 'slot_machine':
n_c, n_resample = n_c, n_resample
elif exp_name == 'mqtt':
n_c, n_resample = n_c, n_resample
elif exp_name == 'tcp':
n_c, n_resample = n_c, n_resample
else:
if exp_name == 'first_grid':
n_c, n_resample = 20, 1000
elif exp_name == 'second_grid':
n_c, n_resample = 20, 1000
elif exp_name == 'shared_coin':
n_c, n_resample = 30, 3000
elif exp_name == 'slot_machine':
n_c, n_resample = 40, 5000
elif exp_name == 'mqtt':
n_c, n_resample = 20, 1000
elif exp_name == 'tcp':
n_c, n_resample = 20, 1000
original_mdp = load_automaton_from_file(path_to_dir + file, automaton_type='mdp')
input_alphabet = original_mdp.get_input_alphabet()
mdp_sul = AutomatonSUL(original_mdp)
eq_oracle = RandomWalkEqOracle(input_alphabet, mdp_sul, num_steps=n_resample * (1 / 0.25),
reset_after_cex=True, reset_prob=0.25)
learned_mdp, data_mdp = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='mdp',
n_c=n_c, n_resample=n_resample, min_rounds=min_rounds,
strategy=strategy,
max_rounds=max_rounds, return_data=True,
samples_cex_strategy="bfs")
mdp_sul.num_steps = 0
mdp_sul.num_queries = 0
learned_smm, data_smm = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='smm',
n_c=n_c, n_resample=n_resample, min_rounds=min_rounds,
strategy=strategy,
max_rounds=max_rounds, return_data=True,
samples_cex_strategy="bfs")
smm_2_mdp = smm_to_mdp_conversion(learned_smm)
mdp_results, mdp_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), learned_mdp)
smm_results, smm_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), smm_2_mdp)
properties_string_header = ",".join([f'{key}_val,{key}_err' for key in mdp_results.keys()])
property_string_mdp = ",".join([f'{str(mdp_results[p])},{str(mdp_err[p])}' for p in mdp_results.keys()])
property_string_smm = ",".join([f'{str(smm_results[p])},{str(smm_err[p])}' for p in smm_results.keys()])
text_file.write('Exp_Name, n_c, n_resample, Final Hypothesis Size, Learning time,'
'Eq. Query Time, Learning Rounds, #MQ Learning, # Steps Learning,'
f'# MQ Eq.Queries, # Steps Eq.Queries , {properties_string_header}\n')
text_file.write(f'learned_mdp_{exp_name},{n_c},{n_resample}, {data_mdp["automaton_size"]}, '
f'{data_mdp["learning_time"]}, {data_mdp["eq_oracle_time"]}, '
f'{data_mdp["learning_rounds"]}, {data_mdp["queries_learning"]}, {data_mdp["steps_learning"]},'
f'{data_mdp["queries_eq_oracle"]}, {data_mdp["steps_eq_oracle"]},'
f'{property_string_mdp}\n')
text_file.write(f'learned_smm_{exp_name},{n_c},{n_resample}, {data_smm["automaton_size"]}, '
f'{data_smm["learning_time"]}, {data_smm["eq_oracle_time"]}, '
f'{data_smm["learning_rounds"]}, {data_smm["queries_learning"]}, {data_smm["steps_learning"]},'
f'{data_smm["queries_eq_oracle"]}, {data_smm["steps_eq_oracle"]},'
f'{property_string_smm}\n')
text_file.flush()
text_file.close()
================================================
FILE: Benchmarking/StopWithErorrRate.py
================================================
import pickle
import random
import time
from collections import defaultdict
from statistics import mean
import aalpy.paths
from aalpy.SULs import AutomatonSUL
from aalpy.learning_algs import run_stochastic_Lstar, run_Alergia
from aalpy.oracles.RandomWordEqOracle import RandomWordEqOracle
from aalpy.utils import load_automaton_from_file, get_properties_file, get_correct_prop_values, model_check_properties
from aalpy.utils import model_check_experiment
from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion
path_to_dir = '../DotModels/MDPs/'
files = ['slot_machine.dot', 'bluetooth.dot'] #
files = ['first_grid.dot', 'second_grid.dot', 'tcp.dot', 'mqtt.dot', 'bluetooth.dot', 'slot_machine.dot']
prop_folder = 'prism_eval_props/'
aalpy.paths.path_to_prism = "C:/Program Files/prism-4.6/bin/prism.bat"
aalpy.paths.path_to_properties = "prism_eval_props/"
model_dict = {m.split('.')[0]: load_automaton_from_file(path_to_dir + m, automaton_type='mdp') for m in files}
model_type = ['smm']
cex_processing = [None, 'longest_prefix', 'rs']
# model_type.reverse()
res = defaultdict(list)
# for file in files:
# for mt in model_type:
# for cp in cex_processing:
# for _ in range(4):
#
# exp_name = file.split('.')[0]
#
# print('--------------------------------------------------')
# print('Experiment:', exp_name, cp)
#
# original_mdp = model_dict[exp_name]
# input_alphabet = original_mdp.get_input_alphabet()
#
# mdp_sul = AutomatonSUL(original_mdp)
#
# eq_oracle = RandomWordEqOracle(input_alphabet, mdp_sul, num_walks=500, min_walk_len=5,
# max_walk_len=15, reset_after_cex=True)
#
# pbs = ((get_properties_file(exp_name),
# get_correct_prop_values(exp_name), 0.02 if exp_name != 'bluetooth' else 0.03))
# learned_classic_mdp, data_mdp = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type=mt,
# min_rounds=10,
# #property_based_stopping=pbs,
# cex_processing=cp,
# samples_cex_strategy=None,
# return_data=True, target_unambiguity=0.98,
# print_level=1)
#
# res[exp_name].append((cp, data_mdp['queries_learning'] + data_mdp['queries_eq_oracle']))
# with open('cex_processing_res.pickle', 'wb') as handle:
# pickle.dump(res, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('cex_processing_res.pickle', 'rb') as handle:
res = pickle.load(handle)
for key, val in res.items():
print(key)
sorted_by_cp = defaultdict(list)
for cp, data in val:
sorted_by_cp[cp].append(data)
for cp_method, data in sorted_by_cp.items():
print(cp_method)
print(mean(data), min(data), max(data))
================================================
FILE: Benchmarking/benchmark.py
================================================
import os
from statistics import mean
from aalpy.SULs import AutomatonSUL
from aalpy.learning_algs import run_Lstar
from aalpy.oracles import StatePrefixEqOracle
from aalpy.utils import load_automaton_from_file
dfa_1000_states_20_inputs = '../DotModels/DFA_1000_states_20_inp'
dfa_2000_states_10_inputs = '../DotModels/DFA_2000_states_10_inp'
moore_1000_states_20_inputs = '../DotModels/Moore_1000_states_20_inp_out'
moore_2000_states_10_inputs = '../DotModels/Moore_2000_states_10_inp_out'
run_times = []
# change on which folder to perform experiments
exp = dfa_2000_states_10_inputs
benchmarks = os.listdir(exp)
benchmarks = benchmarks[:10]
caching_opt = [True, False]
closing_options = ['shortest_first', 'longest_first', 'single']
suffix_processing = ['all', 'single']
counter_example_processing = ['rs', 'longest_prefix', None]
e_closedness = ['prefix', 'suffix']
for b in benchmarks:
automaton = load_automaton_from_file(f'{exp}/{b}', automaton_type='dfa')
input_al = automaton.get_input_alphabet()
sul_dfa = AutomatonSUL(automaton)
state_origin_eq_oracle = StatePrefixEqOracle(input_al, sul_dfa, walks_per_state=5, walk_len=25)
learned_dfa, data = run_Lstar(input_al, sul_dfa, state_origin_eq_oracle, automaton_type='dfa',
cache_and_non_det_check=False, cex_processing='rs', return_data=True, print_level=0)
run_times.append(data['total_time'])
print(run_times)
print(mean(run_times))
================================================
FILE: Benchmarking/benchmark_alphabet_increase.py
================================================
from statistics import mean
import csv
from aalpy.SULs import AutomatonSUL
from aalpy.learning_algs import run_Lstar
from aalpy.oracles import RandomWalkEqOracle
from aalpy.utils import generate_random_dfa, generate_random_mealy_machine, generate_random_moore_machine
num_states = 1000
alph_size = 5
repeat = 10
num_increases = 20
states = ['alph_size', alph_size]
times_dfa = ['dfa_pypy_rs']
times_mealy = ['mealy_pypy_rs']
times_moore = ['moore_pypyrs']
cex_processing = 'rs'
for i in range(num_increases):
print(i)
total_time_dfa = []
total_time_mealy = []
total_time_moore = []
for _ in range(repeat):
alphabet = list(range(alph_size))
dfa = generate_random_dfa(num_states, alphabet=alphabet, num_accepting_states=num_states // 2)
sul = AutomatonSUL(dfa)
# eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=5, walk_len=40)
eq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=10000, reset_prob=0.09)
_, data = run_Lstar(alphabet, sul, eq_oracle, cex_processing=cex_processing, cache_and_non_det_check=False,
return_data=True, automaton_type='dfa')
total_time_dfa.append(data['learning_time'])
del dfa
del sul
del eq_oracle
mealy = generate_random_mealy_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet)
sul_mealy = AutomatonSUL(mealy)
# eq_oracle = StatePrefixEqOracle(alphabet, sul_mealy, walks_per_state=5, walk_len=40)
eq_oracle = RandomWalkEqOracle(alphabet, sul_mealy, num_steps=10000, reset_prob=0.09)
_, data = run_Lstar(alphabet, sul_mealy, eq_oracle, cex_processing=cex_processing,
cache_and_non_det_check=False,
return_data=True, automaton_type='mealy')
total_time_mealy.append(data['learning_time'])
del mealy
del sul_mealy
del eq_oracle
moore = generate_random_moore_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet)
moore_sul = AutomatonSUL(moore)
# eq_oracle = StatePrefixEqOracle(alphabet, moore_sul, walks_per_state=5, walk_len=40)
eq_oracle = RandomWalkEqOracle(alphabet, moore_sul, num_steps=10000, reset_prob=0.09)
_, data = run_Lstar(alphabet, moore_sul, eq_oracle, cex_processing=cex_processing,
cache_and_non_det_check=False,
return_data=True, automaton_type='moore')
total_time_moore.append(data['learning_time'])
alph_size += 5
states.append(alph_size)
# save data and keep averages
times_dfa.append(round(mean(total_time_dfa), 4))
times_mealy.append(round(mean(total_time_mealy), 4))
times_moore.append(round(mean(total_time_moore), 4))
with open('increasing_alphabet_experiments.csv', 'w') as f:
wr = csv.writer(f, dialect='excel')
wr.writerow(states)
wr.writerow(times_dfa)
wr.writerow(times_mealy)
wr.writerow(times_moore)
================================================
FILE: Benchmarking/benchmark_size_increase.py
================================================
from statistics import mean
import csv
from aalpy.SULs import AutomatonSUL
from aalpy.learning_algs import run_Lstar
from aalpy.oracles import RandomWalkEqOracle
from aalpy.utils import generate_random_dfa, generate_random_mealy_machine, generate_random_moore_machine
num_states = 100
alphabet = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
repeat = 15
num_increases = 50
states = ['num_states']
times_dfa = ['dfa_pypy_rs']
times_mealy = ['mealy_pypy_rs']
times_moore = ['moore_pypyrs']
total_dfa = ['dfa_total']
total_mealy = ['mealy_total']
total__moore = ['moore_total']
cex_processing = 'rs'
for i in range(num_increases):
print(i)
learning_time_dfa = []
learning_time_mealy = []
learning_time_moore = []
total_time_dfa = []
total_time_mealy = []
total_time_moore = []
states.append(num_states)
for _ in range(repeat):
dfa = generate_random_dfa(num_states, alphabet=alphabet, num_accepting_states=num_states // 2)
sul = AutomatonSUL(dfa)
# eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=5, walk_len=40)
eq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=9000, reset_prob=0.09)
_, data = run_Lstar(alphabet, sul, eq_oracle, cex_processing=cex_processing, cache_and_non_det_check=False,
return_data=True, automaton_type='dfa')
learning_time_dfa.append(data['learning_time'])
total_time_dfa.append(data['total_time'])
del sul
del eq_oracle
del dfa
mealy = generate_random_mealy_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet)
sul_mealy = AutomatonSUL(mealy)
# eq_oracle = StatePrefixEqOracle(alphabet, sul_mealy, walks_per_state=5, walk_len=40)
eq_oracle = RandomWalkEqOracle(alphabet, sul_mealy, num_steps=9000, reset_prob=0.09)
_, data = run_Lstar(alphabet, sul_mealy, eq_oracle, cex_processing=cex_processing,
cache_and_non_det_check=False,
return_data=True, automaton_type='mealy')
learning_time_mealy.append(data['learning_time'])
total_time_mealy.append(data['total_time'])
del mealy
del sul_mealy
del eq_oracle
moore = generate_random_moore_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet)
moore_sul = AutomatonSUL(moore)
# eq_oracle = StatePrefixEqOracle(alphabet, moore_sul, walks_per_state=5, walk_len=40)
eq_oracle = RandomWalkEqOracle(alphabet, moore_sul, num_steps=9000, reset_prob=0.09)
_, data = run_Lstar(alphabet, moore_sul, eq_oracle, cex_processing=cex_processing,
cache_and_non_det_check=False,
return_data=True, automaton_type='moore')
learning_time_moore.append(data['learning_time'])
total_time_moore.append(data['total_time'])
# save data and keep averages
times_dfa.append(round(mean(learning_time_dfa), 4))
times_mealy.append(round(mean(learning_time_mealy), 4))
times_moore.append(round(mean(learning_time_moore), 4))
total_dfa.append(round(mean(total_time_dfa), 4))
total_mealy.append(round(mean(total_time_mealy), 4))
total__moore.append(round(mean(total_time_moore), 4))
num_states += 100
with open('increasing_size_experiments.csv', 'w') as f:
wr = csv.writer(f, dialect='excel')
wr.writerow(states)
wr.writerow(times_dfa)
wr.writerow(times_mealy)
wr.writerow(times_moore)
wr.writerow(total_dfa)
wr.writerow(total_mealy)
wr.writerow(times_moore)
================================================
FILE: Benchmarking/cex_processing_benchmark.py
================================================
from collections import defaultdict
from statistics import mean, stdev
from aalpy.learning_algs import run_KV, run_Lstar
from aalpy.SULs import AutomatonSUL
from aalpy.oracles import RandomWalkEqOracle
from aalpy.utils import generate_random_deterministic_automata, bisimilar
counterexample_processing_strategy = ['rs', 'linear_fwd', 'linear_bwd', 'exponential_fwd', 'exponential_bwd']
algorithms = ['l_star', 'kv']
model_sizes = [500]
model_type = ['mealy', 'moore']
# alphabet_sizes = [(3,2), (3, 5), (3, 10), (5, 2), (5, 5), (5, 20)]
alphabet_sizes = [(5, 3)]
num_repetitions = 5
for learning_alg in algorithms:
results = defaultdict(list)
for model in model_type:
for model_size in model_sizes:
for input_size, output_size in alphabet_sizes:
for cex_processing in counterexample_processing_strategy:
for _ in range(num_repetitions):
random_model = generate_random_deterministic_automata(model, num_states=model_size,
input_alphabet_size=input_size,
output_alphabet_size=output_size)
sul = AutomatonSUL(random_model)
input_al = random_model.get_input_alphabet()
eq_oracle = RandomWalkEqOracle(input_al, sul, num_steps=20000, reset_prob=0.09)
if learning_alg == 'kv':
learned_model, info = run_KV(input_al, sul, eq_oracle,
automaton_type=model, cex_processing=cex_processing,
return_data=True, print_level=0)
else:
learned_model, info = run_Lstar(input_al, sul, eq_oracle,
automaton_type=model, cex_processing=cex_processing,
return_data=True, print_level=0)
results[cex_processing].append(info['steps_learning'])
if not bisimilar(learned_model, random_model):
print(learning_alg, cex_processing, 'mismatch')
print(learning_alg)
for k, v in results.items():
print(k, mean(v), stdev(v), min(v), max(v))
================================================
FILE: Benchmarking/compare_lstar_and_kv.py
================================================
from aalpy.SULs import AutomatonSUL
from aalpy.learning_algs import run_Lstar, run_KV
from aalpy.oracles import RandomWordEqOracle
from aalpy.utils import generate_random_deterministic_automata
automata_type = ['dfa', 'mealy', 'moore']
automata_size = [10, 100, 500, 1000,]
input_sizes = [2, 3]
output_sizes = [2, 3, 5, 10]
test_models = []
for model_type in automata_type:
for size in automata_size:
for i in input_sizes:
for o in output_sizes:
random_model = generate_random_deterministic_automata(model_type, size, i, o, num_accepting_states=size//8)
input_al = random_model.get_input_alphabet()
print('------------------------------------------')
if model_type != 'dfa':
print(f'Type: {model_type}, size: {size}, # inputs: {i}, # outputs: {o}')
else:
print(f'Type: {model_type}, size: {size}, # inputs: {i}, # accepting: {size//8}')
# Lstar
sul = AutomatonSUL(random_model)
eq_oracle = RandomWordEqOracle(input_al, sul, num_walks=5000, min_walk_len=10, max_walk_len=40)
l_star_model, l_star_info = run_Lstar(input_al, sul, eq_oracle, model_type, print_level=0, return_data=True)
l_star_steps, l_star_queries = l_star_info['steps_learning'], l_star_info['queries_learning']
# KV
sul = AutomatonSUL(random_model)
eq_oracle = RandomWordEqOracle(input_al, sul, num_walks=5000, min_walk_len=10, max_walk_len=40)
kv_model, kv_info = run_KV(input_al, sul, eq_oracle, model_type, print_level=0, return_data=True)
kv_steps, kv_queries = kv_info['steps_learning'], kv_info['queries_learning']
if l_star_model.size != random_model.size:
print('L* did not learn correctly.')
if kv_model.size != random_model.size:
print('KV did not learn correctly.')
print(f'L* steps: {l_star_steps}')
print(f'KV steps: {kv_steps}')
if kv_steps < l_star_steps:
print(f'KV is {round((l_star_steps / kv_steps) * 100 - 100, 2)}% more step efficient')
else:
print(f'L* is {round((kv_steps / l_star_steps) * 100 - 100, 2)}% more step efficient')
================================================
FILE: Benchmarking/error_benchmark_statistics.py
================================================
import csv
import os
from collections import defaultdict
from statistics import mean
directory = 'FM_mdp_smm_error_based_stop/benchmark_no_cq_bfs_longest_prefix/'
benchmarks = os.listdir(directory)[:-1]
benchmarks.remove('exp_14.csv')
values = dict()
for file in benchmarks:
with open(directory + file, 'r') as f:
reader = csv.reader(f)
data = list(reader)
for i in range(0, len(data), 3):
header = data[i]
mdp, smm = data[i + 1], data[i + 2]
for formalism in [mdp, smm]:
for i, val in enumerate(formalism[1:]):
if formalism[0] not in values.keys():
values[formalism[0]] = defaultdict(list)
values[formalism[0]][header[i + 1]].append(round(float(val), 2))
min_values_dict = dict()
max_values_dict = dict()
avr_values_dict = dict()
for exp in values:
exp_name = exp[12:]
formalism = 'smm' if 'smm' in exp else 'mdp'
name = f'{exp_name}_{formalism}'
min_values_dict[name] = dict()
max_values_dict[name] = dict()
avr_values_dict[name] = dict()
for category, value in values[exp].items():
min_values_dict[name][category] = min(value)
max_values_dict[name][category] = max(value)
avr_values_dict[name][category] = round(mean(value), 2)
interesting_fields = [' Learning time', ' Learning Rounds', ' #MQ Learning', ' # Steps Learning']
print('ALL ERRORS ARE LESS THAN 2%. THAT WAS USED AS STOPPING CRITERION')
experiments = list(min_values_dict.keys())
for e_index in range(0, len(experiments), 2):
for i in interesting_fields:
print(f'{experiments[e_index]} vs {experiments[e_index + 1]} = > {i}')
min_eff = round(min_values_dict[experiments[e_index]][i] / min_values_dict[experiments[e_index + 1]][i]*100 , 2)
print(f'Min : {min_values_dict[experiments[e_index]][i]} vs {min_values_dict[experiments[e_index + 1]][i]} | SMM efficiency : {min_eff}')
max_eff = round(max_values_dict[experiments[e_index]][i] / max_values_dict[experiments[e_index + 1]][i]*100 , 2)
print(f'Max : {max_values_dict[experiments[e_index]][i]} vs {max_values_dict[experiments[e_index + 1]][i]} | SMM efficiency : {max_eff}')
avr_eff = round(avr_values_dict[experiments[e_index]][i] / avr_values_dict[experiments[e_index + 1]][i]*100 , 2)
print(f'Avr : {avr_values_dict[experiments[e_index]][i]} vs {avr_values_dict[experiments[e_index + 1]][i]}| SMM efficiency : {avr_eff}')
print('-------------------------------------------------')
with open('error_benchmark.csv', 'w',newline='') as file:
writer = csv.writer(file)
experiments = list(min_values_dict.keys())
for e_index in range(0, len(experiments), 2):
writer.writerow([experiments[e_index][:-4], 'mdp', 'smm', 'smm compared to mdp efficiency %'])
for i in interesting_fields:
print(f'{experiments[e_index]} vs {experiments[e_index + 1]} = > {i}')
min_eff = round(min_values_dict[experiments[e_index]][i] / min_values_dict[experiments[e_index + 1]][i]*100 , 2)
writer.writerow([i + '_min', min_values_dict[experiments[e_index]][i], min_values_dict[experiments[e_index + 1]][i], min_eff])
max_eff = round(max_values_dict[experiments[e_index]][i] / max_values_dict[experiments[e_index + 1]][i]*100 , 2)
writer.writerow([i + '_max', max_values_dict[experiments[e_index]][i], max_values_dict[experiments[e_index + 1]][i], max_eff])
avr_eff = round(avr_values_dict[experiments[e_index]][i] / avr_values_dict[experiments[e_index + 1]][i]*100 , 2)
writer.writerow([i + '_avr', avr_values_dict[experiments[e_index]][i], avr_values_dict[experiments[e_index + 1]][i], avr_eff])
writer.writerow([])
print('-------------------------------------------------')
================================================
FILE: Benchmarking/evaluate_l_star_configurations.py
================================================
import pickle
from collections import defaultdict
from random import seed
from statistics import mean
from aalpy.SULs import AutomatonSUL
from aalpy.learning_algs import run_Lstar
from aalpy.oracles import StatePrefixEqOracle, RandomWMethodEqOracle, RandomWalkEqOracle, RandomWordEqOracle
from aalpy.utils import generate_random_deterministic_automata
#closing_strategies = ['shortest_first']
closing_strategies = ['shortest_first', 'longest_first', 'single', 'single_longest']
obs_table_cell_prefixes = [True, False]
suffix_closed = [True, False]
cex_processing = [None, 'longest_prefix', 'rs']
automata_size = [400 ]
input_sizes = [2,]
output_sizes = [3, ]
num_repeats = 10
test_models = []
for size in automata_size:
for i in input_sizes:
for o in output_sizes:
random_model = generate_random_deterministic_automata('dfa', size, i, o, num_accepting_states=30)
test_models.append(random_model)
tc = 0
num_exp = len(test_models) * len(closing_strategies) * len(suffix_closed) * num_repeats * len(cex_processing) * len(obs_table_cell_prefixes)
stats = defaultdict(list)
for test_model in test_models:
input_al = test_model.get_input_alphabet()
for closedness_type in suffix_closed:
for closing_strategy in closing_strategies:
for cex in cex_processing:
for prefix_in_cell in obs_table_cell_prefixes:
for _ in range(num_repeats):
tc += 1
print(round(tc / num_exp * 100, 2))
# seed(tc)
sul = AutomatonSUL(test_model)
eq_oracle = RandomWordEqOracle(input_al, sul, num_walks=5000, min_walk_len=10, max_walk_len=40)
model, info = run_Lstar(input_al, sul, eq_oracle, 'dfa',
closing_strategy=closing_strategy,
cex_processing=cex,
e_set_suffix_closed=closedness_type,
all_prefixes_in_obs_table=prefix_in_cell,
print_level=0,
return_data=True)
config_name = f'suffix_closed:{closedness_type},closing_strategy:{closing_strategy},' \
f'cex:{cex},prefixes_in_cell_{prefix_in_cell}'
stats[config_name].append(
(info['queries_learning'],
info['steps_learning'],
model.size == test_model.size))
with open('stats.pickle', 'wb') as handle:
pickle.dump(stats, handle, protocol=pickle.HIGHEST_PROTOCOL)
# with open('stats.pickle', 'rb') as handle:
# stats = pickle.load(handle)
statistics_sorted = []
for k, v in stats.items():
mean_queries, mean_steps, num_correct = mean([x[0] for x in v]), mean([x[1] for x in v]), sum([x[2] for x in v])
statistics_sorted.append((k, mean_queries, mean_steps, num_correct))
statistics_sorted.sort(key=lambda x: x[2])
for k, q, s, c in statistics_sorted:
print(k, int(q), int(s), c)
# suffix_closed:True,closing_strategy:longest_first,cex:rs,prefixes_in_cell_True 6702 122491 10
# suffix_closed:False,closing_strategy:single,cex:rs,prefixes_in_cell_True 8339 150049 10
# suffix_closed:False,closing_strategy:longest_first,cex:rs,prefixes_in_cell_True 8087 154159 10
# suffix_closed:True,closing_strategy:single,cex:rs,prefixes_in_cell_True 8711 159552 10
# suffix_closed:False,closing_strategy:shortest_first,cex:longest_prefix,prefixes_in_cell_True 9032 162634 10
# suffix_closed:True,closing_strategy:shortest_first,cex:rs,prefixes_in_cell_True 9024 163924 10
# suffix_closed:False,closing_strategy:shortest_first,cex:rs,prefixes_in_cell_True 8685 164049 10
# suffix_closed:False,closing_strategy:longest_first,cex:longest_prefix,prefixes_in_cell_True 9011 167036 10
# suffix_closed:False,closing_strategy:single,cex:longest_prefix,prefixes_in_cell_True 9099 176007 10
# suffix_closed:True,closing_strategy:single,cex:longest_prefix,prefixes_in_cell_True 9291 178341 10
# suffix_closed:True,closing_strategy:shortest_first,cex:longest_prefix,prefixes_in_cell_True 9317 179006 10
# suffix_closed:False,closing_strategy:shortest_first,cex:None,prefixes_in_cell_True 10491 193517 10
# suffix_closed:True,closing_strategy:single,cex:None,prefixes_in_cell_True 11139 213554 10
# suffix_closed:True,closing_strategy:longest_first,cex:longest_prefix,prefixes_in_cell_True 10865 214022 10
# suffix_closed:True,closing_strategy:shortest_first,cex:None,prefixes_in_cell_True 12039 221108 10
# suffix_closed:False,closing_strategy:longest_first,cex:None,prefixes_in_cell_True 11107 222398 10
# suffix_closed:False,closing_strategy:single_longest,cex:rs,prefixes_in_cell_True 8998 227650 10
# suffix_closed:True,closing_strategy:longest_first,cex:None,prefixes_in_cell_True 12042 230262 10
# suffix_closed:False,closing_strategy:single,cex:None,prefixes_in_cell_True 12176 239711 10
# suffix_closed:True,closing_strategy:shortest_first,cex:rs,prefixes_in_cell_False 15891 319699 10
# suffix_closed:True,closing_strategy:single_longest,cex:None,prefixes_in_cell_True 10758 321832 10
# suffix_closed:False,closing_strategy:single_longest,cex:None,prefixes_in_cell_True 11331 330290 10
# suffix_closed:True,closing_strategy:single,cex:rs,prefixes_in_cell_False 16014 331618 10
# suffix_closed:False,closing_strategy:shortest_first,cex:rs,prefixes_in_cell_False 15315 335046 10
# suffix_closed:False,closing_strategy:single_longest,cex:rs,prefixes_in_cell_False 15289 339521 10
# suffix_closed:True,closing_strategy:longest_first,cex:rs,prefixes_in_cell_False 16933 343434 10
# suffix_closed:False,closing_strategy:single,cex:rs,prefixes_in_cell_False 16561 356579 10
# suffix_closed:False,closing_strategy:longest_first,cex:rs,prefixes_in_cell_False 17112 367300 10
# suffix_closed:False,closing_strategy:longest_first,cex:None,prefixes_in_cell_False 21305 421926 10
# suffix_closed:True,closing_strategy:single,cex:None,prefixes_in_cell_False 21228 439700 10
# suffix_closed:False,closing_strategy:single,cex:longest_prefix,prefixes_in_cell_False 21119 446616 10
# suffix_closed:True,closing_strategy:longest_first,cex:longest_prefix,prefixes_in_cell_False 20241 449658 10
# suffix_closed:False,closing_strategy:single,cex:None,prefixes_in_cell_False 22933 454032 10
# suffix_closed:False,closing_strategy:shortest_first,cex:longest_prefix,prefixes_in_cell_False 20601 456783 10
# suffix_closed:True,closing_strategy:longest_first,cex:None,prefixes_in_cell_False 21736 457516 10
# suffix_closed:False,closing_strategy:shortest_first,cex:None,prefixes_in_cell_False 22398 469775 10
# suffix_closed:True,closing_strategy:shortest_first,cex:longest_prefix,prefixes_in_cell_False 22365 477132 10
# suffix_closed:True,closing_strategy:single,cex:longest_prefix,prefixes_in_cell_False 21557 505135 10
# suffix_closed:True,closing_strategy:shortest_first,cex:None,prefixes_in_cell_False 24000 518014 10
# suffix_closed:False,closing_strategy:longest_first,cex:longest_prefix,prefixes_in_cell_False 23837 548645 10
# suffix_closed:False,closing_strategy:single_longest,cex:None,prefixes_in_cell_False 24994 583869 10
# suffix_closed:True,closing_strategy:single_longest,cex:None,prefixes_in_cell_False 24839 588329 10
# suffix_closed:False,closing_strategy:single_longest,cex:longest_prefix,prefixes_in_cell_True 10322 620607 10
# suffix_closed:True,closing_strategy:single_longest,cex:rs,prefixes_in_cell_True 9930 651615 10
# suffix_closed:True,closing_strategy:single_longest,cex:rs,prefixes_in_cell_False 16761 729912 10
# suffix_closed:True,closing_strategy:single_longest,cex:longest_prefix,prefixes_in_cell_True 11419 779192 10
# suffix_closed:True,closing_strategy:single_longest,cex:longest_prefix,prefixes_in_cell_False 19734 919089 10
# suffix_closed:False,closing_strategy:single_longest,cex:longest_prefix,prefixes_in_cell_False 19842 939866 10
================================================
FILE: Benchmarking/fm_benchmark.py
================================================
import csv
import os
from collections import defaultdict
from statistics import mean
# directory = 'FM_mdp_smm/benchmark_no_cq_bfs_longest_prefix/'
directory = 'FM_mdp_smm/benchmark_no_cq_merged_longest_prefix/'
#directory = 'FM_mdp_smm/benchmark_new_chi2/'
# directory = 'FM_mdp_smm/benchmark_no_cq_None_longest_prefix/'
# directory = 'FM_mdp_smm/benchmark_chi_square_None_longest_prefix/'
benchmarks = os.listdir(directory)
values = dict()
for file in benchmarks:
with open(directory + file, 'r') as f:
reader = csv.reader(f)
data = list(reader)
for i in range(0, len(data), 3):
header = data[i]
mdp,smm = data[i+1], data[i + 2]
for formalism in [mdp, smm]:
for i, val in enumerate(formalism[1:]):
if formalism[0] not in values.keys():
values[formalism[0]] = defaultdict(list)
values[formalism[0]][header[i+1]].append(round(float(val), 2))
min_values_dict = dict()
max_values_dict = dict()
avr_values_dict = dict()
for exp in values:
exp_name = exp[12:]
formalism = 'smm' if 'smm' in exp else 'mdp'
name = f'{exp_name}_{formalism}'
min_values_dict[name] = dict()
max_values_dict[name] = dict()
avr_values_dict[name] = dict()
for category, value in values[exp].items():
min_values_dict[name][category] = min(value)
max_values_dict[name][category] = max(value)
avr_values_dict[name][category] = round(mean(value), 2)
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
# TODO REMOVE WHITESPACES for new benchmarking results
interesting_fields = ['Learning time', 'Learning Rounds', '#MQ Learning', '# Steps Learning', 'prob1_val','prob1_err','prob2_val','prob2_err','prob3_val','prob3_err','prob4_val','prob4_err','prob5_val','prob5_err']
experiments = list(min_values_dict.keys())
for e_index in range(0, len(experiments), 2):
for i in interesting_fields:
if i not in min_values_dict[experiments[e_index]].keys():
continue
print(f'{experiments[e_index]} vs {experiments[e_index + 1]} = > {i}')
if min_values_dict[experiments[e_index + 1]][i] != 0:
min_eff = round(min_values_dict[experiments[e_index]][i] / min_values_dict[experiments[e_index + 1]][i]*100 , 2)
else:
min_eff = 0
print(f'Min : {min_values_dict[experiments[e_index]][i]} vs {min_values_dict[experiments[e_index + 1]][i]} | SMM efficiency : {min_eff}')
if max_values_dict[experiments[e_index + 1]][i] != 0:
max_eff = round(max_values_dict[experiments[e_index]][i] / max_values_dict[experiments[e_index + 1]][i]*100 , 2)
else:
max_eff = 0
print(f'Max : {max_values_dict[experiments[e_index]][i]} vs {max_values_dict[experiments[e_index + 1]][i]} | SMM efficiency : {max_eff}')
if avr_values_dict[experiments[e_index + 1]][i] != 0:
avr_eff = round(avr_values_dict[experiments[e_index]][i] / avr_values_dict[experiments[e_index + 1]][i]*100 , 2)
else:
avr_eff = 0
print(f'Avr : {avr_values_dict[experiments[e_index]][i]} vs {avr_values_dict[experiments[e_index + 1]][i]}| SMM efficiency : {avr_eff}')
print('-------------------------------------------------')
with open('fm_statistics_2204.csv', 'w',newline='') as file:
writer = csv.writer(file)
experiments = list(min_values_dict.keys())
for e_index in range(0, len(experiments), 2):
writer.writerow([experiments[e_index][:-4], 'mdp', 'smm', 'smm compared to mdp efficiency %'])
for i in interesting_fields:
if i not in min_values_dict[experiments[e_index]].keys():
continue
if min_values_dict[experiments[e_index + 1]][i] != 0:
min_eff = round(
min_values_dict[experiments[e_index]][i] / min_values_dict[experiments[e_index + 1]][i] * 100, 2)
else:
min_eff = 0
writer.writerow([i + '_min', min_values_dict[experiments[e_index]][i], min_values_dict[experiments[e_index + 1]][i], min_eff])
if max_values_dict[experiments[e_index + 1]][i] != 0:
max_eff = round(
max_values_dict[experiments[e_index]][i] / max_values_dict[experiments[e_index + 1]][i] * 100, 2)
else:
max_eff = 0
writer.writerow([i + '_max', max_values_dict[experiments[e_index]][i], max_values_dict[experiments[e_index + 1]][i], max_eff])
if avr_values_dict[experiments[e_index + 1]][i] != 0:
avr_eff = round(
avr_values_dict[experiments[e_index]][i] / avr_values_dict[experiments[e_index + 1]][i] * 100, 2)
else:
avr_eff = 0
writer.writerow([i + '_avr', avr_values_dict[experiments[e_index]][i], avr_values_dict[experiments[e_index + 1]][i], avr_eff])
writer.writerow([])
print('-------------------------------------------------')
================================================
FILE: Benchmarking/fm_plots.py
================================================
def plot_error():
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
# MDP then SMM
learning_time_data = [
[68.19, 140.31, 154.35, 116.8],
[27.5, 98.31, 30.87, 68]
]
num_mq_data = [
[81803.23, 153758.15, 560705.92, 248552.62, ],
[36937.54, 91309.08, 51791.54, 92607]
]
import numpy as np
N = 4
ind = np.arange(N) # the x locations for the groups
width = 0.25 # the width of the bars
# fig = plt.figure()
fig, (ax_time, ax_mq) = plt.subplots(1, 2, figsize=(10, 3))
ax_time.bar(ind, learning_time_data[0], width, label='MDP')
ax_time.bar(ind + width, learning_time_data[1], width, label='SMM')
# add some
ax_time.set_ylabel('Learning Time (s)')
ax_time.set_xticks(ind + width / 2)
ax_time.set_xticklabels(('35 State\nGridworld', '72 State\nGridworld', 'MQTT', 'TCP',))
ax_time.grid(axis='y')
ax_time.legend(loc='upper left')
ax_mq.bar(ind, num_mq_data[0], width, label='MDP')
ax_mq.bar(ind + width, num_mq_data[1], width, label='SMM')
# add some
ax_mq.set_ylabel('\# Membership Queries')
ax_mq.ticklabel_format(axis='y', style='sci', scilimits=(1, 4))
ax_mq.set_xticks(ind + width / 2)
ax_mq.set_xticklabels(('35 State\nGridworld', '72 State\nGridworld', 'MQTT', 'TCP',))
ax_mq.legend(loc='upper left')
ax_mq.grid(axis='y')
fig.tight_layout()
# plt.show()
plt.savefig("error_bench.pgf", bbox_inches='tight')
import tikzplotlib
tikzplotlib.save("error_bench.tex")
def plot_benchmarks():
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
# MDP then SMM
num_mq_data = [
[81803.23, 153758.15, 560705.92, 248552.62, ],
[36937.54, 91309.08, 51791.54, 92607]
]
# TODO
avr_cum_err = [
]
import numpy as np
N = 4
ind = np.arange(N) # the x locations for the groups
width = 0.25 # the width of the bars
# fig = plt.figure()
fig, (ax_time, ax_mq) = plt.subplots(1, 2, figsize=(10, 3))
ax_time.bar(ind, avr_cum_err[0], width, label='MDP')
ax_time.bar(ind + width, avr_cum_err[1], width, label='SMM')
# add some
ax_time.set_ylabel('Learning Time (s)')
ax_time.set_xticks(ind + width / 2)
ax_time.set_xticklabels(('35 State\nGridworld', '72 State\nGridworld', 'MQTT', 'TCP',))
ax_time.grid(axis='y')
ax_time.legend(loc='upper left')
ax_mq.bar(ind, num_mq_data[0], width, label='MDP')
ax_mq.bar(ind + width, num_mq_data[1], width, label='SMM')
# add some
ax_mq.set_ylabel('\# Membership Queries')
ax_mq.ticklabel_format(axis='y', style='sci', scilimits=(1, 4))
ax_mq.set_xticks(ind + width / 2)
ax_mq.set_xticklabels(('35 State\nGridworld', '72 State\nGridworld', 'MQTT', 'TCP',))
ax_mq.legend(loc='upper left')
ax_mq.grid(axis='y')
fig.tight_layout()
# plt.show()
plt.savefig("benchmarking.pgf", bbox_inches='tight')
import tikzplotlib
tikzplotlib.save("benchmarking.tex")
if __name__ == '__main__':
plot_error()
================================================
FILE: Benchmarking/generate_plots.py
================================================
from random import random, randint
import matplotlib
from matplotlib import pyplot as plt
import csv
def plot_increasing_size_exp():
data = []
with open('increasing_size_experiments.csv', 'r') as f:
reader = csv.reader(f)
data = list(reader)
x_axis = data.pop(0)
x_axis.pop(0)
# use total times only
data = data[3:]
x_axis = [int(x) for x in x_axis]
#x_axis = list(range(2,51))
times = []
labels = ['DFA', 'Mealy', 'Moore']
for r in data:
row_name = r.pop(0)
times.extend([float(i) for i in r])
plt.plot(x_axis, [float(i) for i in r], label=labels.pop(0))
plt.legend()
plt.xticks([100, 1000, 2000, 3000, 4000, 5000,])
plt.yticks([min(times), 0.5, 1, 1.5, max(times)])
#plt.yticks([min(times), 1, 2, max(times)])
#plt.yticks([min(times), 3, 6, 10 , 14])
plt.ylabel("Time (s)")
plt.xlabel("Automaton Size")
#plt.grid(axis='y')
plt.grid()
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
plt.savefig('state_increase_runtime.pgf')
#plt.show()
def plot_increasing_alphabeth_exp():
data = []
with open('increasing_alphabet_experiments.csv', 'r') as f:
reader = csv.reader(f)
data = list(reader)
x_axis = data.pop(0)[:-1]
x_axis.pop(0)
# use total times only
x_axis = [int(x) for x in x_axis]
#x_axis = list(range(2,51))
times = []
labels = ['DFA', 'Mealy', 'Moore']
for r in data:
row_name = r.pop(0)
times.extend([float(i) for i in r])
plt.plot(x_axis, [float(i) for i in r], label=labels.pop(0))
plt.legend()
plt.xticks([5,25,50,75,100])
plt.yticks([min(times), 1, 2.5, 4, max(times)])
#plt.yticks([min(times), 1, 2, max(times)])
#plt.yticks([min(times), 3, 6, 10 , 14])
plt.ylabel("Time (s)")
plt.xlabel("Alphabet Size")
#plt.grid(axis='y')
plt.grid()
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
plt.savefig('alphabet_increase_runtime.pgf')
#plt.show()
def plot_together():
fig, (ax2, ax1) = plt.subplots(1, 2, figsize=(10, 3))
with open('increasing_alphabet_experiments.csv', 'r') as f:
reader = csv.reader(f)
data = list(reader)
x_axis = data.pop(0)[:-1]
x_axis.pop(0)
# use total times only
x_axis = [int(x) for x in x_axis]
#x_axis = list(range(2,51))
times = []
labels = ['DFA', 'Mealy', 'Moore']
for r in data:
row_name = r.pop(0)
times.extend([float(i) for i in r])
ax1.plot_side_by_side()
ax1.legend()
ax1.set_xticks([5,25,50,75,100], minor=False)
ax1.set_yticks([min(times), 1, 2.5, 4, max(times)], minor=False)
#plt.yticks([min(times), 1, 2, max(times)])
#plt.yticks([min(times), 3, 6, 10 , 14])
ax1.set_ylabel("Time (s)")
ax1.set_xlabel("Alphabet Size")
#plt.grid(axis='y')
ax1.grid()
with open('increasing_size_experiments.csv', 'r') as f:
reader = csv.reader(f)
data = list(reader)
x_axis = data.pop(0)
x_axis.pop(0)
# use total times only
data = data[3:]
x_axis = [int(x) for x in x_axis]
#x_axis = list(range(2,51))
times = []
labels = ['DFA', 'Mealy', 'Moore']
for r in data:
row_name = r.pop(0)
times.extend([float(i) for i in r])
ax2.plot_side_by_side()
ax2.legend()
ax2.set_xticks([100, 1000, 2000, 3000, 4000, 5000,], minor=False)
ax2.set_yticks([min(times), 0.5, 1, 1.5, max(times)], minor=False)
#plt.yticks([min(times), 1, 2, max(times)])
#plt.yticks([min(times), 3, 6, 10 , 14])
ax2.set_ylabel("Time (s)")
ax2.set_xlabel("Automaton Size")
#plt.grid(axis='y')
ax2.grid()
# matplotlib.use("pgf")
# matplotlib.rcParams.update({
# "pgf.texsystem": "pdflatex",
# 'font.family': 'serif',
# 'text.usetex': True,
# 'pgf.rcfonts': False,
# })
#
# plt.savefig('state_increase_runtime.pgf')
fig.tight_layout()
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
fig.savefig('both_images.pgf',bbox_inches='tight')
def plot_together():
fig, (ax2, ax1) = plt.subplots(1, 2, figsize=(10, 3))
with open('increasing_alphabet_experiments.csv', 'r') as f:
reader = csv.reader(f)
data = list(reader)
x_axis = data.pop(0)[:-1]
x_axis.pop(0)
# use total times only
x_axis = [int(x) for x in x_axis]
# x_axis = list(range(2,51))
times = []
labels = ['DFA', 'Mealy', 'Moore']
for r in data:
row_name = r.pop(0)
times.extend([float(i) for i in r])
ax1.plot_side_by_side()
ax1.legend()
ax1.set_xticks([5, 25, 50, 75, 100], minor=False)
ax1.set_yticks([min(times), 1, 2.5, 4, max(times)], minor=False)
# plt.yticks([min(times), 1, 2, max(times)])
# plt.yticks([min(times), 3, 6, 10 , 14])
ax1.set_ylabel("Time (s)")
ax1.set_xlabel("Alphabet Size")
# plt.grid(axis='y')
ax1.grid()
with open('increasing_size_experiments.csv', 'r') as f:
reader = csv.reader(f)
data = list(reader)
x_axis = data.pop(0)
x_axis.pop(0)
# use total times only
data = data[3:]
x_axis = [int(x) for x in x_axis]
# x_axis = list(range(2,51))
times = []
labels = ['DFA', 'Mealy', 'Moore']
for r in data:
row_name = r.pop(0)
times.extend([float(i) for i in r])
ax2.plot_side_by_side()
ax2.legend()
ax2.set_xticks([100, 1000, 2000, 3000, 4000, 5000, ], minor=False)
ax2.set_yticks([min(times), 0.5, 1, 1.5, max(times)], minor=False)
# plt.yticks([min(times), 1, 2, max(times)])
# plt.yticks([min(times), 3, 6, 10 , 14])
ax2.set_ylabel("Time (s)")
ax2.set_xlabel("Automaton Size")
# plt.grid(axis='y')
ax2.grid()
# matplotlib.use("pgf")
# matplotlib.rcParams.update({
# "pgf.texsystem": "pdflatex",
# 'font.family': 'serif',
# 'text.usetex': True,
# 'pgf.rcfonts': False,
# })
#
# plt.savefig('state_increase_runtime.pgf')
fig.tight_layout()
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
fig.savefig('both_images.pgf', bbox_inches='tight')
# queries_mealy_sizes, 100,500,1000,2000
# learnlib, 3356.14, 22316.74, 49037.52, 106613.32
# aalpy, 2255.1, 6025.8, 8037.85, 12372.5
def plot_together_learnlib_comp():
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 3))
with open('learnlib_com.csv', 'r') as f:
reader = csv.reader(f)
data = list(reader)
x_axis = data.pop(0)[:-1]
x_axis.pop(0)
x_axis = [int(x) for x in x_axis]
# x_axis = list(range(2,51))
times = []
labels = ['DFA(AALpy)','DFA(LearnLib)', 'Mealy(AALpy)', 'Mealy(Learnlib)']
for r in data:
row_name = r.pop(0)
times.extend([float(i) for i in r])
ax1.plot_side_by_side()
ax1.legend()
ax1.set_xticks([100, 1000, 2000, 3000, 4000, 5000], minor=False)
ax1.set_yticks([min(times), 0.5, 1, 1.5, max(times)], minor=False)
# plt.yticks([min(times), 1, 2, max(times)])
# plt.yticks([min(times), 3, 6, 10 , 14])
ax1.set_ylabel("Time (s)")
ax1.set_xlabel("Automaton Size")
# plt.grid(axis='y')
ax1.grid()
with open('learnlib_alph_comp.csv', 'r') as f:
reader = csv.reader(f)
data = list(reader)
x_axis = data.pop(0)
x_axis.pop(0)
# use total times only
x_axis = [int(x) for x in x_axis]
# x_axis = list(range(2,51))
times = []
labels = ['DFA(AALpy)', 'DFA(LearnLib)', 'Mealy(AALpy)', 'Mealy(Learnlib)']
for r in data:
row_name = r.pop(0)
times.extend([float(i) for i in r])
ax2.plot_side_by_side()
ax2.legend()
ax2.set_xticks([5, 25, 50, 75, 100], minor=False)
ax2.set_yticks([min(times), 1, 2.5, 4, max(times)], minor=False)
# plt.yticks([min(times), 1, 2, max(times)])
# plt.yticks([min(times), 3, 6, 10 , 14])
ax2.set_ylabel("Time (s)")
ax2.set_xlabel("Alphabet Size")
# plt.grid(axis='y')
ax2.grid()
# matplotlib.use("pgf")
# matplotlib.rcParams.update({
# "pgf.texsystem": "pdflatex",
# 'font.family': 'serif',
# 'text.usetex': True,
# 'pgf.rcfonts': False,
# })
#
# plt.savefig('state_increase_runtime.pgf')
fig.tight_layout()
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
fig.savefig('learnlib_comp.pgf', bbox_inches='tight')
def plot_runtime_steps():
automaton_sizes = list(range(2,100,2))
learnlib = [84.0, 84.0, 308.0, 452.0, 516.0, 564.0, 836.0, 996.0, 1188.0, 1268.0, 1870.5, 1604.0, 2358.46, 2004.0, 3189.12, 2699.32, 3756.52, 3617.64, 3925.64, 5144.98, 4278.78, 5198.06, 5452.32, 5272.12, 4841.46, 6046.92, 5864.2, 6043.34, 7501.06, 8291.58, 6976.76, 7722.48, 8498.26, 8021.4, 8409.82, 8427.62, 10348.0, 9017.5, 11266.08, 10496.48, 10948.76, 11697.94, 12539.88, 12573.5, 12266.44, 12788.36, 13619.44, 13204.7, 14218.14]
aalpy = [80, 80, 304, 448, 512, 560, 832, 992, 1184, 1264, 1904.4666666666667, 1600, 2355, 2000, 3304.4, 2969, 3875.8, 3491.8, 3994.6666666666665, 5259.8, 4122.6, 5408.2, 5425.6, 5457.733333333334, 4700.6, 5575.266666666666, 5877.333333333333, 5643.333333333333, 7848, 8746.4, 6902, 6764.133333333333, 8968.266666666666, 8994.666666666666, 8548.066666666668, 8034.2, 10303.8, 9518.2, 11861.6, 10639.466666666667, 11049.466666666667, 12028.666666666666, 12798.4, 13055.733333333334, 12525.4, 13473.6, 13445.866666666667, 13654.866666666667, 15162.066666666668]
learnlib_dfa_steps = [12, 176, 366, 817.517349244473, 729.3582188142311, 862.8326032572571, 1629.7225991534913, 2278.6612662357325, 1851.9775842649628, 2256.358673947518, 2877.186267318514, 3621.257762117678, 4309.52400476166, 3957.7026357704567, 5892.417280455997, 4340.934611960256, 5361.8053640093285, 5826.332838306764, 6170.173413693644, 6362.896456856769, 7467.5489502843675, 10647.813901891837, 7771.6990409704595, 10021.521929114147, 11682.322172805243, 13337.927737932912, 12739.746892560444, 14478.86166799318, 12871.644461597713, 13099.900598108483, 11801.712844210842, 14545.968533421541, 16248.345494832956, 16266.864422969868, 15963.905452213141, 19195.796973001674, 17252.921273408057, 18935.661631940457, 17732.29013734688, 19960.551029106155, 17375.00727356809, 18493.587556335762, 22476.68693665887, 20993.560784351062, 24736.797016941302, 23454.586313063137, 21647.451883062633, 29979.506261659953, 25118.61300496266]
aalpy_dfa_steps = [12, 176.6, 366.93333333333334, 790.5333333333333, 840.5333333333333, 886.1333333333333, 1657.8,
2225.133333333333, 2076.3333333333335, 2649.3333333333335, 3065, 3218.0666666666666, 4783.466666666666,
3860.3333333333335, 5724.4, 4960.733333333334, 5226.733333333334, 6479.4, 5991.866666666667, 7096.2,
7241.866666666667, 9292.066666666668, 8487.6, 10789.4, 11765.2, 12693.666666666666, 11715.266666666666,
12860.6, 11887.466666666667, 12740.333333333334, 12882.133333333333, 15290.666666666666, 15572,
14563.066666666668, 14886.8, 17937.466666666667, 16332.533333333333, 20078.6, 18581.8,
18823.266666666666, 20331.733333333334, 20824.733333333334, 19940.8, 23263.666666666668,
21659.466666666667, 25069.933333333334, 24592.333333333332, 27892.2, 24161.066666666666]
fig, ax1 = plt.subplots()
ax1.set_xlabel('Automaton Size')
ax1.set_ylabel('Number of Learning Steps')
ax1.plot_side_by_side()
ax1.plot_side_by_side()
ax1.plot_side_by_side()
ax1.plot_side_by_side()
# ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
#
ax2.set_ylabel('Total Learning Time (ms)') # we already handled the x-label with ax1
ax2.plot_side_by_side()
ax2.plot_side_by_side()
ax2.plot_side_by_side()
ax2.plot_side_by_side()
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.grid()
plt.legend()
# matplotlib.use("pgf")
# matplotlib.rcParams.update({
# "pgf.texsystem": "pdflatex",
# 'font.family': 'serif',
# 'text.usetex': True,
# 'pgf.rcfonts': False,
# #'font.size': 42
# })
import tikzplotlib
tikzplotlib.save("test.txt")
#fig.savefig('runtime_comp.pgf', bbox_inches='tight')
plot_runtime_steps()
#plot_runtime_steps()
# plot_together_learnlib_comp()
#plot_increasing_size_exp()
#plot_increasing_alphabeth_exp()
================================================
FILE: Benchmarking/json_lbt.py
================================================
import json
import random
from collections import defaultdict
from aalpy import run_PAPNI, load_automaton_from_file
from aalpy.automata import VpaAlphabet
from aalpy.utils import generate_input_output_data_from_vpa
def is_valid_json(s):
try:
json.loads(s)
return True
except json.JSONDecodeError:
return False
def to_json_string(json_tuple):
json_str = ''
for x in json_tuple:
if x in {'key', 'val'}:
json_str += f'\"{x}\"'
else:
json_str += x
return json_str
def generate_random_json(max_depth=3, max_elements=5):
"""
Generate a random valid JSON structure using abstract symbols.
Args:
max_depth (int): Maximum nesting depth
max_elements (int): Maximum number of elements in arrays or objects
Returns:
list: List of symbols representing a valid JSON structure
"""
def generate_value(current_depth):
if current_depth >= max_depth:
return ['val']
# Choose between simple value, array, or object
choice = random.choice(['simple', 'array', 'object'])
if choice == 'simple':
return ['val']
elif choice == 'array':
return generate_array(current_depth)
else:
return generate_object(current_depth)
def generate_array(current_depth):
# Generate array with random number of elements
num_elements = random.randint(1, max_elements)
result = ['[']
for i in range(num_elements):
result.extend(generate_value(current_depth + 1))
if i < num_elements - 1:
result.append(',')
result.append(']')
return result
def generate_object(current_depth):
# Generate object with random number of key-value pairs
num_pairs = random.randint(1, max_elements)
result = ['{']
for i in range(num_pairs):
result.append('key')
result.append(':')
result.extend(generate_value(current_depth + 1))
if i < num_pairs - 1:
result.append(',')
result.append('}')
return result
# Start generation with an object at the root
json_tuple = tuple(generate_object(0))
json_str = to_json_string(json_tuple)
return json_str, json_tuple
def corrupt_json(symbols):
"""
Take a valid JSON symbol list and make it invalid using various strategies.
Returns corrupted symbols and description of corruption.
"""
corrupted = symbols.copy()
strategies = [
'bracket_mismatch', # Replace brackets with mismatched ones
'add_symbol', # Add inappropriate symbol
'drop_symbol', # Remove necessary symbol
'naked_value', # Just a lone value
'naked_key', # Just a lone key
'multiple_commas', # Add multiple consecutive commas
'trailing_comma', # Add comma at the end
'missing_value', # Remove value after key
'missing_key', # Remove key before value
'missing_colon', # Remove colon between key-value
'standalone_colon', # Just a colon
'comma_start_end', # Start or end with comma
'empty_structure', # Empty structure with internal comma
'multiple_colons' # Multiple colons in key-value pair
]
strategy = random.choice(strategies)
if strategy == 'bracket_mismatch':
bracket_positions = [i for i, s in enumerate(corrupted)
if s in ['{', '}', '[', ']']]
if bracket_positions:
pos = random.choice(bracket_positions)
original = corrupted[pos]
corruptions = {
'{': random.choice(['}', ']']),
'}': random.choice(['{', '[']),
'[': random.choice(['}', ']']),
']': random.choice(['{', '['])
}
corrupted[pos] = corruptions[original]
reason = f"Replaced '{original}' with '{corrupted[pos]}'"
elif strategy == 'naked_value':
corrupted = ['val']
reason = "Created lone value without structure"
elif strategy == 'naked_key':
corrupted = ['key']
reason = "Created lone key without value or structure"
elif strategy == 'multiple_commas':
num_commas = random.randint(2, 4)
corrupted = ['{'] + [',' for _ in range(num_commas)] + ['}']
reason = f"Created structure with {num_commas} consecutive commas"
elif strategy == 'trailing_comma':
if len(corrupted) > 2: # Need at least {} or []
if corrupted[-1] in ['}', ']']:
corrupted.insert(-1, ',')
reason = "Added trailing comma before closing bracket"
else:
corrupted.append(',')
reason = "Added trailing comma at end"
else:
corrupted = ['{', ',', '}']
reason = "Added trailing comma in empty structure"
elif strategy == 'missing_value':
key_positions = [i for i, s in enumerate(corrupted) if s == 'key']
if key_positions:
pos = random.choice(key_positions)
if pos + 2 < len(corrupted) and corrupted[pos + 1] == ':':
corrupted.pop(pos + 2) # Remove value
reason = "Removed value after key"
else:
corrupted = ['{', 'key', ':', '}']
reason = "Created key without value"
else:
corrupted = ['{', 'key', ':', '}']
reason = "Created key without value"
elif strategy == 'missing_key':
corrupted = ['{', ':', 'val', '}']
reason = "Created value with missing key"
elif strategy == 'missing_colon':
colon_positions = [i for i, s in enumerate(corrupted) if s == ':']
if colon_positions:
pos = random.choice(colon_positions)
corrupted.pop(pos)
reason = "Removed colon between key and value"
else:
corrupted = ['{', 'key', 'val', '}']
reason = "Created key-value pair without colon"
elif strategy == 'standalone_colon':
corrupted = [':']
reason = "Created standalone colon"
elif strategy == 'comma_start_end':
if random.choice([True, False]):
corrupted = [','] + corrupted
reason = "Added comma at start"
else:
corrupted.append(',')
reason = "Added comma at end"
elif strategy == 'empty_structure':
structure_type = random.choice(['{', '['])
closing = '}' if structure_type == '{' else ']'
corrupted = [structure_type, ',', closing]
reason = f"Created empty {structure_type}{closing} with internal comma"
elif strategy == 'multiple_colons':
colon_count = random.randint(2, 3)
corrupted = ['{', 'key'] + [':' for _ in range(colon_count)] + ['val', '}']
reason = f"Added {colon_count} colons between key and value"
elif strategy == 'add_symbol':
possible_additions = ['key', 'val', ',', ':', '{', '}', '[', ']']
symbol_to_add = random.choice(possible_additions)
pos = random.randint(0, len(corrupted))
corrupted.insert(pos, symbol_to_add)
reason = f"Added '{symbol_to_add}' at position {pos}"
else: # drop_symbol
if len(corrupted) > 1:
pos = random.randint(0, len(corrupted) - 1)
dropped = corrupted.pop(pos)
reason = f"Dropped '{dropped}' from position {pos}"
else:
corrupted = []
reason = "Dropped all symbols"
return corrupted
# Define call, return, and internal symbols for JSON
call_set = ['{', '[']
return_set = ['}', ']']
internal_set = [':', ',', 'key', 'val']
# Define the input alphabet
vpa_alphabet = VpaAlphabet(
internal_alphabet=internal_set,
call_alphabet=call_set,
return_alphabet=return_set
)
def generate_dataset(num_sequences):
dataset = set()
while len(dataset) <= num_sequences:
ts, tt = generate_random_json(max_depth=2, max_elements=3)
assert is_valid_json(ts), ts
dataset.add((tuple(tt), True))
for _ in range(5):
ft = corrupt_json(list(tt))
json_str = ''.join(ft)
if not is_valid_json(json_str):
dataset.add((tuple(ft), False))
return dataset
def validate_string_with_json_parser(json_str, json_parser):
assert json_parser in {'json', 'ujson', 'orjson', 'simplejson', 'demjson', 'pyjson5'}
import ujson
import orjson
import json5
import simplejson as sj
import demjson
import pyjson5
try:
if json_parser == "json":
json.loads(json_str)
elif json_parser == "ujson":
ujson.loads(json_str)
elif json_parser == "orjson":
orjson.loads(json_str)
elif json_parser == "json5":
json5.loads(json_str)
elif json_parser == "simplejson":
sj.loads(json_str)
elif json_parser == "demjson":
demjson.decode(json_str)
elif json_parser == "pyjson5":
pyjson5.loads(json_str)
else:
raise ValueError("Unsupported JSON parser")
return True
except (json.JSONDecodeError, ujson.JSONDecodeError, orjson.JSONDecodeError,
demjson.JSONDecodeError, pyjson5.Json5Exception, ValueError) as e:
return False
use_learned_model = False
model_learning_dataset = []
if not use_learned_model:
model_learning_dataset = generate_dataset(num_sequences=20000)
learned_json_model = run_PAPNI(model_learning_dataset, vpa_alphabet)
# learned_json_model.visualize()
learned_json_model.save('learned_json.dot')
else:
learned_json_model = load_automaton_from_file('learned_json.dot', 'vpa')
parsers_under_test = ['json', 'ujson', 'orjson', 'simplejson', 'demjson', 'pyjson5']
num_learning_iterations = 3
disagreements = defaultdict(list)
results = defaultdict(list)
for _ in range(num_learning_iterations):
disagreements.clear()
test_dataset = generate_input_output_data_from_vpa(learned_json_model, num_sequences=10000, max_seq_len=16)
print(f"Num well-matched tests: {len([x for x in test_dataset if learned_json_model.is_balanced(x[0])])}")
num_new_sequences = 0
for seq, label in test_dataset:
json_string = to_json_string(seq)
for p in parsers_under_test:
parser_output = validate_string_with_json_parser(json_string, p)
if parser_output != label:
disagreements[p].append(json_string)
if json_string not in results.keys() or json_string in results.keys() and len(results[json_string]) != len(parsers_under_test):
results[json_string].append(parser_output)
add_to_test_set = all(json_string in x for x in disagreements.values())
if add_to_test_set:
if (seq, label) in model_learning_dataset:
model_learning_dataset.remove((seq, label))
model_learning_dataset.add((seq, not label))
num_new_sequences += 1
print(f'Added {num_new_sequences} to learning set, total size {len(model_learning_dataset)}')
learned_json_model = run_PAPNI(model_learning_dataset, vpa_alphabet, print_info=False)
print(f'Current model size: {learned_json_model.size}')
comparison_results = {}
for key, values in results.items():
true_indexes = [parsers_under_test[i] for i, v in enumerate(values) if v]
false_indexes = [parsers_under_test[i] for i, v in enumerate(values) if not v]
if true_indexes and false_indexes: # Ensure there are both True and False values
comparison_results[key] = [true_indexes, false_indexes]
for test_str, res in comparison_results.items():
print('---------------------')
print(test_str)
print('Postive ', res[0])
print('Negative', res[1])
#
# for key, val in disagreements.items():
# print('----------------------------------------------------------')
# print(key)
# print(f'Total number of discrepancies: {len(val)}')
#
# # check which disagreements are not present in other parsers
# values = set(val)
# other_values = set()
# for k, v in disagreements.items():
# if k != key:
# other_values.update(v)
#
# unique = list(values - other_values)
#
# print(f'Unique discrepancies: {len(unique)}')
# if unique:
# print('Printing unique discrepancies')
# for i in unique:
# print("".join(i))
================================================
FILE: Benchmarking/papni_sequences.pickle
================================================
[File too large to display: 24.3 MB]
================================================
FILE: Benchmarking/papni_vs_rpni_benchmarking.py
================================================
import pickle
from collections import defaultdict
from random import shuffle
from aalpy import run_RPNI, run_PAPNI, AutomatonSUL
from aalpy.utils import convert_i_o_traces_for_RPNI, generate_input_output_data_from_vpa, is_balanced
from aalpy.utils.BenchmarkVpaModels import get_all_VPAs
from statistics import mean, stdev
# Load positive and negativ sequances for all models
all_data = dict()
with open('papni_sequences.pickle', 'rb') as handle:
all_data = pickle.load(handle)
def calculate_f1_score(precision, recall):
if precision + recall == 0:
return 0
return 2 * (precision * recall) / (precision + recall)
def calculate_precision_recall_f1(true_positives, false_positives, false_negatives):
precision = true_positives / (true_positives + false_positives) if (true_positives + false_positives) > 0 else 0
recall = true_positives / (true_positives + false_negatives) if (true_positives + false_negatives) > 0 else 0
f1 = calculate_f1_score(precision, recall)
return precision, recall, f1
def compare_rpni_and_papni(test_data, rpni_model, papni_model):
def evaluate_model(learned_model, test_data):
true_positives = 0
false_positives = 0
false_negatives = 0
for input_seq, correct_output in test_data:
learned_model.reset_to_initial()
learned_output = learned_model.execute_sequence(learned_model.initial_state, input_seq)[-1]
if learned_output and correct_output:
true_positives += 1
elif learned_output and not correct_output:
false_positives += 1
elif not learned_output and correct_output:
false_negatives += 1
precision = true_positives / (true_positives + false_positives) if (true_positives + false_positives) > 0 else 0
recall = true_positives / (true_positives + false_negatives) if (true_positives + false_negatives) > 0 else 0
f1 = calculate_f1_score(precision, recall)
return precision, recall, f1
rpni_error = evaluate_model(rpni_model, test_data)
papni_error = evaluate_model(papni_model, test_data)
# print(f'RPNI size {rpni_model.size} vs {papni_model.size} PAPNI size')
# print(f'RPNI precision, recall, f1: {rpni_error}')
# print(f'PAPNI precision, recall, f1: {papni_error}')
return [rpni_model.size, papni_model.size, rpni_error, papni_error]
# run KV for VPDA learning and record all sequences
# these sequences should cover the whole characterizing set required to learn 1-SEVPA
def get_sequences_from_active_sevpa(model, verbose=False):
from aalpy import SUL, run_KV, RandomWordEqOracle, SevpaAlphabet
class CustomSUL(SUL):
def __init__(self, automatonSUL):
super(CustomSUL, self).__init__()
self.sul = automatonSUL
self.sequences = []
def pre(self):
self.tc = []
self.sul.pre()
def post(self):
self.sequences.append(self.tc)
self.sul.post()
def step(self, letter):
output = self.sul.step(letter)
if letter is not None:
self.tc.append((letter, output))
return output
vpa_alphabet = model.get_input_alphabet()
alphabet = SevpaAlphabet(vpa_alphabet.internal_alphabet, vpa_alphabet.call_alphabet, vpa_alphabet.return_alphabet)
sul = AutomatonSUL(model)
sul = CustomSUL(sul)
eq_oracle = RandomWordEqOracle(alphabet.get_merged_alphabet(), sul, num_walks=50000, min_walk_len=6,
max_walk_len=30, reset_after_cex=True)
# eq_oracle = BreadthFirstExplorationEqOracle(vpa_alphabet.get_merged_alphabet(), sul, 7)
lm = run_KV(alphabet, sul, eq_oracle, automaton_type='vpa', print_level=3 if verbose else 0)
return convert_i_o_traces_for_RPNI(sul.sequences), lm
def split_data_to_learning_and_testing(data, learning_to_test_ratio=0.5):
total_number_positive = len([x for x in data if x[1]])
total_number_negative = len(data) - total_number_positive
num_learning_positive_seq = total_number_positive * learning_to_test_ratio
num_learning_negative_seq = total_number_negative * learning_to_test_ratio
# sorted(data, key=lambda x: len(x[0]))
shuffle(data)
learning_sequences, test_sequences = [], []
l_pos, l_neg = 0, 0
for seq, label in data:
if label and l_pos <= num_learning_positive_seq:
learning_sequences.append((seq, label))
l_pos += 1
elif not label and l_neg <= num_learning_negative_seq:
learning_sequences.append((seq, label))
l_neg += 1
else:
test_sequences.append((seq, label))
return learning_sequences, test_sequences
def run_experiment(experiment_id,
ground_truth_model,
num_of_learning_seq,
max_learning_seq_len,
random_data_generation=True,
learning_to_test_ratio=0.5):
if random_data_generation:
data = generate_input_output_data_from_vpa(ground_truth_model,
num_sequences=num_of_learning_seq,
max_seq_len=max_learning_seq_len,
)
else:
all_generated_data = all_data[experiment_id]
# sorted(all_generated_data, key=lambda x: len(x))
shuffle(all_generated_data)
positive_seq = [x for x in all_generated_data if x[1]]
negative_seq = [x for x in all_generated_data if not x[1]]
data = []
data += positive_seq[:5000]
data += negative_seq[:10000 - len(data)]
vpa_alphabet = ground_truth_model.get_input_alphabet()
learning_data, test_data = split_data_to_learning_and_testing(data, learning_to_test_ratio=learning_to_test_ratio)
num_positive_learning = len([x for x in learning_data if x[1]])
learning_set_size = (num_positive_learning, len(learning_data) - num_positive_learning)
num_positive_test = len([x for x in test_data if x[1]])
num_test_size = (num_positive_test, len(test_data) - num_positive_test)
rpni_model = run_RPNI(learning_data, 'dfa', print_info=False, input_completeness='sink_state')
papni_model = run_PAPNI(learning_data, vpa_alphabet, print_info=False)
comparison_results = compare_rpni_and_papni(test_data, rpni_model, papni_model)
comparison_results = comparison_results + [learning_set_size, num_test_size]
return comparison_results
def run_experiments_multiple_times(test_models, num_times, learning_to_test_ratio=0.5):
all_results = defaultdict(list)
print(f'Running experiment on each model {num_times} times.')
for idx, gt in enumerate(test_models):
print('Experiment: ', idx)
for _ in range(num_times):
r = run_experiment(idx, gt, num_of_learning_seq=10000, max_learning_seq_len=50,
random_data_generation=False, learning_to_test_ratio=learning_to_test_ratio)
all_results[idx].append(r)
rpni_results = [r[2] for r in all_results[idx]]
papni_results = [r[3] for r in all_results[idx]]
rpni_f1 = [r[2] for r in rpni_results]
papni_f1 = [r[2] for r in papni_results]
print(f'RPNI (F1) : min {min(rpni_f1)}, max {max(rpni_f1)}, mean {mean(rpni_f1)}, stddev {stdev(rpni_f1)}')
print(f'PAPNI (F1) : min {min(papni_f1)}, max {max(papni_f1)}, mean {mean(papni_f1)}, stddev {stdev(papni_f1)}')
print('----------------------------------------------------------------')
import pickle
with open('papni_rpni_eval_results.pickle', 'wb') as handle:
pickle.dump(all_results, handle, protocol=pickle.HIGHEST_PROTOCOL)
def test_papni_based_on_sevpa_dataset():
all_models = get_all_VPAs()
for idx, gt in enumerate(all_models):
sevpa_papni_mismatch, papni_error, sevpa_error = 0, 0, 0
input_al = gt.get_input_alphabet()
sevpa_dataset, sevpa_model = get_sequences_from_active_sevpa(gt)
sevpa_dataset_set = set(sevpa_dataset)
papni_model = run_PAPNI(sevpa_dataset, input_al)
balanced_counter = 0
not_in_learning = 0
in_learning = 0
for seq, label in all_data[idx]:
if is_balanced(seq, input_al):
balanced_counter += 1
if (seq, label) not in sevpa_dataset_set:
not_in_learning += 1
else:
in_learning += 1
sevpa_model.reset_to_initial()
sevpa_output = sevpa_model.execute_sequence(sevpa_model.initial_state, seq)
papni_model.reset_to_initial()
papni_output = papni_model.execute_sequence(papni_model.initial_state, seq)
if sevpa_output != papni_output:
sevpa_papni_mismatch += 1
if papni_output[-1] != label:
papni_error += 1
if sevpa_output[-1] != label:
sevpa_error += 1
print('--------------------------------------')
print(f'Model Index {idx}; # well-matched {balanced_counter}, # unique tests {not_in_learning}')
print(f'Papni Error {papni_error}')
print(f'Sevpa Error {sevpa_error}')
print(f'Mismatch {sevpa_papni_mismatch}')
assert in_learning + not_in_learning == balanced_counter
#test_papni_based_on_sevpa_dataset()
all_models = get_all_VPAs()
run_experiments_multiple_times(all_models, num_times=20, learning_to_test_ratio=0.5)
================================================
FILE: Benchmarking/passive_mdp_vs_smm.py
================================================
import random
import aalpy.paths
from aalpy.SULs import AutomatonSUL
from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion
from aalpy.learning_algs import run_Alergia
from aalpy.utils import load_automaton_from_file, get_correct_prop_values, get_properties_file
from aalpy.utils import model_check_experiment
path_to_dir = '../DotModels/MDPs/'
files = ['first_grid.dot', 'second_grid.dot',
'slot_machine.dot', 'mqtt.dot', 'tcp.dot'] # 'shared_coin.dot'
aalpy.paths.path_to_prism = "C:/Program Files/prism-4.7/bin/prism.bat"
aalpy.paths.path_to_properties = "prism_eval_props/"
def writeSamplesToFile(samples, path="alergiaSamples.txt"):
isSMM = False
if isinstance(samples[0][0], tuple):
isSMM = True
with open(path, 'a') as f:
for sample in samples:
s = "" if isSMM else f'{str(sample.pop(0))}'
for i, o in sample:
s += f',{i},{o}'
f.write(s + '\n')
f.close()
# samples.clear()
def deleteSampleFile(path="alergiaSamples.txt"):
import os
if os.path.exists(path):
os.remove(path)
num_traces = 100000
for file in ['first_grid.dot']:
exp_name = file.split('.')[0]
original_mdp = load_automaton_from_file(path_to_dir + file, automaton_type='mdp')
input_alphabet = original_mdp.get_input_alphabet()
mdp_sul = AutomatonSUL(original_mdp)
for _ in range(1):
data = []
for _ in range(num_traces):
sample = [mdp_sul.pre()]
for _ in range(random.randint(10, 50)):
i = random.choice(input_alphabet)
o = mdp_sul.step(i)
sample.append((i, o))
data.append(sample)
mdp_sul.post()
learned_mdp = run_Alergia(data, automaton_type='mdp')
for s in data:
s.pop(0)
learned_smm = run_Alergia(data, automaton_type='smm')
smm_2_mdp = smm_to_mdp_conversion(learned_smm)
mdp_results, mdp_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), learned_mdp)
smm_results, smm_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), smm_2_mdp)
print(learned_mdp.size, learned_smm.size, smm_2_mdp.size)
print(f'-------{exp_name}---------')
print(f'MDP Error: {mdp_err}')
print(f'SMM Error: {smm_err}')
smm_diff = {}
for key, val in mdp_err.items():
if key not in smm_err.keys() or smm_err[key] == 0:
continue
smm_diff[key] = round(smm_err[key] - val, 2)
print(f'SMM differance: {smm_diff}')
================================================
FILE: Benchmarking/prism_eval_props/bluetooth.props
================================================
Pmax=? [ F<5 ("crash") ]
Pmax=? [ F<8 ("crash") ]
Pmax=? [ F<11 ("crash") ]
Pmax=? [ F<14 ("crash") ]
Pmax=? [ F<17 ("crash") ]
Pmax=? [ F<20 ("crash") ]
Pmax=? [ F<3 ("no_response") ]
Pmax=? [ F<5 ("no_response") ]
Pmax=? [ F<8 ("no_response") ]
Pmax=? [ F<11 ("no_response") ]
Pmax=? [ F<14 ("no_response") ]
Pmax=? [ F<17 ("no_response") ]
Pmax=? [ F<20 ("no_response") ]
================================================
FILE: Benchmarking/prism_eval_props/emqtt_two_client.props
================================================
Pmax=? [ F<5 ("c2_Pub_c2_my_topic_bye") ]
Pmax=? [ F<5 ("c1_crash") ]
Pmax=? [ F<11 ("c1_crash") ]
Pmax=? [ F<17 ("c1_crash") ]
Pmax=? [(!("c2_crash")) U<12 ("c2_Pub_c2_my_topic_messageQos1") ]
================================================
FILE: Benchmarking/prism_eval_props/first_eval.props
================================================
Pmax=?[F<12("goal")]
Pmax=? [ !("grass") U<=14 ("goal") ]
Pmax=? [ !("sand") U<=16 ("goal") ]
================================================
FILE: Benchmarking/prism_eval_props/second_eval.props
================================================
Pmax=?[F<15("goal")]
Pmax=?[F<13("goal")]
Pmax=? [ !("mud") U<=18 ("goal") ]
Pmax=? [ !("sand") U<=20 ("goal") ]
================================================
FILE: Benchmarking/prism_eval_props/shared_coin_eval.props
================================================
Pmax=?[F ("finished" & "c1_heads" & "c2_tails")]
Pmax=?[F ("finished" & "c1_tails" & "c2_tails")]
Pmax=?[!"five" U "finished"]
Pmax=?[!"four" U "finished"]
Pmax=?[F<40 ("finished" & "c1_heads" & "c2_tails")]
Pmax=?[F<40 ("finished" & "c1_tails" & "c2_tails")]
Pmax=?[!"five" U<40 "finished"]
Pmax=?[!"four" U<40 "finished"]
================================================
FILE: Benchmarking/prism_eval_props/slot_machine_eval.props
================================================
Pmax=? [ F ("Pr10") ]
Pmax=? [ F ("Pr2") ]
Pmax=? [ F ("Pr0") ]
Pmax=? [ X (X ("r220")) ]
Pmax=? [ X(X (X ("r122"))) ]
Pmax=? [ !(F<10 ("end")) ]
Pmax=? [ X (X (X ("r111")))&(F ("Pr0")) ]
================================================
FILE: Benchmarking/prism_eval_props/tcp_eval.props
================================================
Pmax=? [ F<5 ("crash") ]
Pmax=? [ F<11 ("crash") ]
Pmax=? [ F<17 ("crash") ]
Pmax=? [ F<23 ("crash") ]
================================================
FILE: Benchmarking/rpni_papni_memory_footrpint.py
================================================
import sys
from random import randint, random
import matplotlib.pyplot as plt
# Data
import tikzplotlib
from Benchmarking.visualize_papni_rpni import tikzplotlib_fix_ncols
from aalpy import load_automaton_from_file, run_PAPNI, run_RPNI
from aalpy.utils import generate_input_output_data_from_vpa
from aalpy.utils.BenchmarkVpaModels import get_all_VPAs
from random import seed
# def get_total_size(obj, seen=None):
# """Recursively find the size of an object and all its referenced objects."""
# if seen is None:
# seen = set()
#
# obj_id = id(obj)
# if obj_id in seen: # Avoid processing the same object multiple times
# return 0
#
# seen.add(obj_id)
# size = sys.getsizeof(obj)
#
# if isinstance(obj, dict):
# size += sum(get_total_size(k, seen) + get_total_size(v, seen) for k, v in obj.items())
# elif isinstance(obj, (list, tuple, set, frozenset)):
# size += sum(get_total_size(i, seen) for i in obj)
# elif hasattr(obj, '__dict__'): # For objects with __dict__ attribute
# size += get_total_size(vars(obj), seen)
# elif hasattr(obj, '__slots__'): # For objects with __slots__
# size += sum(get_total_size(getattr(obj, s), seen) for s in obj.__slots__ if hasattr(obj, s))
#
# return size
#
#
# def size_in_mb(obj):
# size_bytes = get_total_size(obj)
# return size_bytes / (1024 ** 2)
#
#
# #gt = load_automaton_from_file('../DotModels/arithmetics.dot', 'vpa')
# gt = get_all_VPAs()[9]
# vpa_alphabet = gt.get_input_alphabet()
#
# rpni_size = []
# papni_size = []
# for size in range(5000, 50001, 5000):
# print(size)
# data = generate_input_output_data_from_vpa(gt,
# num_sequences=size,
# max_seq_len=randint(6, 30))
#
# y = run_RPNI(data, automaton_type='dfa', print_info=False)
# x = run_PAPNI(data, vpa_alphabet, print_info=False)
#
# rpni_size.append(y)
# papni_size.append(x)
#
# print(rpni_size)
# print(papni_size)
# runtime (pta, alg) papni, rpni
rpni_runtime = [(0.02, 0.04), (0.06, 0.11), (0.11, 0.14), (0.11, 0.22), (0.14, 0.24), (0.12, 0.26), (0.15, 0.31), (0.26, 0.28), (0.21, 0.4), (0.25, 0.43)]
papni_runtime = [(0.0, 0.01), (0.01, 0.04), (0.02, 0.04), (0.02, 0.05), (0.02, 0.06), (0.04, 0.07), (0.02, 0.06), (0.03, 0.06), (0.06, 0.1), (0.03, 0.09)]
# size rpni papni in Mb
rpni_size = [1.8873348236083984, 3.9477672576904297, 5.673147201538086, 7.70704460144043, 9.281957626342773, 12.503767013549805, 14.622617721557617, 15.591878890991211, 18.589590072631836, 20.439626693725586]
papni_size = [0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312, 0.0034532546997070312]
papni_size = [papni_size[0]]
for i in range(len(rpni_runtime) - 1):
papni_size.append(papni_size[-1] * (rpni_size[i+1]/rpni_size[i] ))
print(papni_size)
# Create subplots
fig, axes = plt.subplots(1, 2, figsize=(12, 5))
ticks = range(5000, 50001, 5000)
# Runtime plot
axes[0].plot(ticks, [x + y for x,y in rpni_runtime], label="RPNI", marker='o')
axes[0].plot(ticks, [x + y for x,y in papni_runtime], label="PAPNI", marker='s')
axes[0].set_xlabel("Input Size")
axes[0].set_ylabel("Runtime (s)")
axes[0].set_title("Runtime Comparison")
axes[0].legend()
axes[0].grid(True)
# Size plot
axes[1].plot(ticks, rpni_size, label="RPNI", marker='o')
axes[1].plot(ticks, papni_size, label="PAPNI", marker='s')
axes[1].set_xlabel("Input Size")
axes[1].set_ylabel("Size (MB)")
axes[1].set_title("Size Comparison")
axes[1].legend()
axes[1].grid(True)
# Layout adjustment
plt.tight_layout()
# plt.show()
tikzplotlib_fix_ncols(fig)
# plt.show()
tikzplotlib.save("runtime_and_size_comparison.tex")
================================================
FILE: Benchmarking/stochastic_benchmarking/Benchmark_ErrorStop.py
================================================
import random
import os
import aalpy.paths
from aalpy.SULs import AutomatonSUL
from aalpy.learning_algs import run_stochastic_Lstar
from aalpy.oracles.RandomWordEqOracle import RandomWordEqOracle
from aalpy.utils import load_automaton_from_file, get_properties_file, get_correct_prop_values
from aalpy.utils import model_check_experiment
from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion
seeds = [1212,4557,19059,468,43,654,235345,6546,76768,4563,543526,777676,5555,776767,87878787,98989,60967553,3866677,1555841,8638]
path_to_dir = '../DotModels/MDPs/'
files = ['first_grid.dot', 'second_grid.dot'] # 'slot_machine.dot' ,'shared_coin.dot' 'mqtt.dot', 'tcp.dot'
prop_folder = '../prism_eval_props/'
# TODO Change the path to your PRIMS executable and change the path_to_prism in the stop_based_on_confidence method in ModelChecking.py.
prism_executable = "/home/mtappler/Programs/prism-4.4-linux64/bin/prism"
aalpy.paths.path_to_prism = "C:/Program Files/prism-4.6/bin/prism.bat"
aalpy.paths.path_to_properties = "prism_eval_props/"
n_c = 20
n_resample = 1000
min_rounds = 10
max_rounds = 300
experiment_repetition = 5
uniform_parameters = False
strategy = ["normal"] # chi_square
cex_sampling = [None] # random:100:0.15
cex_processing = [None] # add a single prefix
for strat in strategy:
for cex_stat in cex_sampling:
for cex_proc in cex_processing:
print(strat, cex_stat, cex_proc)
benchmark_dir = f'FM_mdp_smm_error_based_stop/benchmark_{strat}_{cex_stat}_{cex_proc}/'
if not os.path.exists(benchmark_dir):
os.makedirs(benchmark_dir)
for seed in range(experiment_repetition):
print(seed)
random.seed(seeds[seed])
text_file = open(f"{benchmark_dir}/exp_{seed}.csv", "w")
for file in files:
print(file)
exp_name = file.split('.')[0]
if uniform_parameters:
if exp_name == 'first_grid':
n_c, n_resample = n_c, n_resample
elif exp_name == 'second_grid':
n_c, n_resample = n_c, n_resample
elif exp_name == 'shared_coin':
n_c, n_resample = n_c, n_resample
elif exp_name == 'slot_machine':
n_c, n_resample = n_c, n_resample
elif exp_name == 'mqtt':
n_c, n_resample = n_c, n_resample
elif exp_name == 'tcp':
n_c, n_resample = n_c, n_resample
else:
if exp_name == 'first_grid':
n_c, n_resample = 20, 1000
elif exp_name == 'second_grid':
n_c, n_resample = 20, 2000
elif exp_name == 'shared_coin':
n_c, n_resample = 25, 2500
elif exp_name == 'slot_machine':
n_c, n_resample = 30, 5000
elif exp_name == 'mqtt':
n_c, n_resample = 20, 1000
elif exp_name == 'tcp':
n_c, n_resample = 20, 1000
stopping_data = (get_properties_file(exp_name), get_correct_prop_values(exp_name), 0.02)
original_mdp = load_automaton_from_file(path_to_dir + file, automaton_type='mdp')
input_alphabet = original_mdp.get_input_alphabet()
mdp_sul = AutomatonSUL(original_mdp)
eq_oracle = RandomWordEqOracle(input_alphabet, mdp_sul, num_walks=150, min_walk_len=5,
max_walk_len=15, reset_after_cex=True)
learned_mdp, data_mdp = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='mdp',
n_c=n_c, n_resample=n_resample, min_rounds=min_rounds, strategy=strat,
max_rounds=max_rounds, return_data=True, samples_cex_strategy=cex_stat,
print_level=1, cex_processing=cex_proc, property_based_stopping=stopping_data)
del mdp_sul
del eq_oracle
random.seed(seeds[seed])
mdp_sul = AutomatonSUL(original_mdp)
eq_oracle = RandomWordEqOracle(input_alphabet, mdp_sul, num_walks=150, min_walk_len=5,
max_walk_len=15, reset_after_cex=True)
learned_smm, data_smm = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='smm',
n_c=n_c, n_resample=n_resample, min_rounds=min_rounds, strategy=strat,
max_rounds=max_rounds, return_data=True, samples_cex_strategy=cex_stat,
print_level=1, cex_processing=cex_proc, property_based_stopping=stopping_data)
smm_2_mdp = smm_to_mdp_conversion(learned_smm)
mdp_results, mdp_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), learned_mdp)
smm_results, smm_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), smm_2_mdp)
properties_string_header = ",".join([f'{key}_val,{key}_err' for key in mdp_results.keys()])
property_string_mdp = ",".join([f'{str(mdp_results[p])},{str(mdp_err[p])}' for p in mdp_results.keys()])
property_string_smm = ",".join([f'{str(smm_results[p])},{str(smm_err[p])}' for p in smm_results.keys()])
text_file.write('Exp_Name, n_c, n_resample, Final Hypothesis Size, Learning time,'
'Eq. Query Time, Learning Rounds, #MQ Learning, # Steps Learning,'
f'# MQ Eq.Queries, # Steps Eq.Queries , {properties_string_header}\n')
text_file.write(f'learned_mdp_{exp_name},{n_c},{n_resample}, {data_mdp["automaton_size"]}, '
f'{data_mdp["learning_time"]}, {data_mdp["eq_oracle_time"]}, '
f'{data_mdp["learning_rounds"]}, {data_mdp["queries_learning"]}, {data_mdp["steps_learning"]},'
f'{data_mdp["queries_eq_oracle"]}, {data_mdp["steps_eq_oracle"]},'
f'{property_string_mdp}\n')
text_file.write(f'learned_smm_{exp_name},{n_c},{n_resample}, {data_smm["automaton_size"]}, '
f'{data_smm["learning_time"]}, {data_smm["eq_oracle_time"]}, '
f'{data_smm["learning_rounds"]}, {data_smm["queries_learning"]}, {data_smm["steps_learning"]},'
f'{data_smm["queries_eq_oracle"]}, {data_smm["steps_eq_oracle"]},'
f'{property_string_smm}\n')
text_file.flush()
text_file.close()
================================================
FILE: Benchmarking/stochastic_benchmarking/CompleteStochasticBenchmarking.py
================================================
import random
import time
import aalpy.paths
from aalpy.SULs import AutomatonSUL
from aalpy.learning_algs import run_stochastic_Lstar
from aalpy.oracles.RandomWordEqOracle import RandomWordEqOracle
from aalpy.utils import load_automaton_from_file, get_properties_file, get_correct_prop_values
from aalpy.utils import model_check_experiment
from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion
seeds = [29334,1554,9430459,92344168,55451679,569315,7776892,3875261,811,51,766603,778438967,9819877,6755560,52903,5257,4635,358,1441,838]
path_to_dir = '../DotModels/MDPs/'
# files = ['first_grid.dot', 'second_grid.dot', 'slot_machine.dot', 'mqtt.dot', 'tcp.dot'] # 'slot_machine.dot' ,
# 'shared_coin.dot'
files = ['second_grid.dot', 'mqtt.dot'] # 'slot_machine.dot' ,'shared_coin.dot'
prop_folder = '../prism_eval_props/'
# TODO Change the path to your PRIMS executable
aalpy.paths.path_to_prism = "C:/Program Files/prism-4.6/bin/prism.bat"
aalpy.paths.path_to_properties = "../prism_eval_props/"
n_c = 10
n_resample = 1000
min_rounds = 25
max_rounds = 500
experiment_repetition = 10
uniform_parameters = False
strategy = ["normal", "chi2"] # chi_square
cex_sampling = ['bfs',] # random:100:0.15
cex_processing = [None, 'longest_prefix'] # add a single prefix
start = time.time()
model_dict = {m.split('.')[0] : load_automaton_from_file(path_to_dir + m, automaton_type='mdp') for m in files}
for strat in strategy:
for cex_stat in cex_sampling:
for cex_proc in cex_processing:
print(strat, cex_stat, cex_proc)
benchmark_dir = f'FM_mdp_smm/benchmark_22_04_{strat}_{cex_proc}/'
for seed in range(experiment_repetition):
print(seed)
random.seed(seeds[seed])
import os
if not os.path.exists(benchmark_dir):
os.makedirs(benchmark_dir)
text_file = open(f"{benchmark_dir}/exp_{seed}.csv", "w")
for file in files:
print(file)
exp_name = file.split('.')[0]
original_mdp = model_dict[exp_name]
input_alphabet = original_mdp.get_input_alphabet()
mdp_sul = AutomatonSUL(original_mdp)
eq_oracle = RandomWordEqOracle(input_alphabet, mdp_sul, num_walks=150, min_walk_len=5,
max_walk_len=16, reset_after_cex=True)
learned_mdp, data_mdp = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='mdp',
min_rounds=min_rounds, strategy=strat,
max_rounds=max_rounds, return_data=True, samples_cex_strategy=cex_stat,
print_level=1, cex_processing=cex_proc, target_unambiguity=0.99)
del mdp_sul
del eq_oracle
random.seed(seeds[seed])
mdp_sul = AutomatonSUL(original_mdp)
eq_oracle = RandomWordEqOracle(input_alphabet, mdp_sul, num_walks=150, min_walk_len=5,
max_walk_len=15, reset_after_cex=True)
learned_smm, data_smm = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='smm',
min_rounds=min_rounds, strategy=strat,
max_rounds=max_rounds, return_data=True, samples_cex_strategy=cex_stat,
print_level=1, cex_processing=cex_proc, target_unambiguity=0.99)
smm_2_mdp = smm_to_mdp_conversion(learned_smm)
mdp_results, mdp_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), learned_mdp)
smm_results, smm_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), smm_2_mdp)
properties_string_header = ",".join([f'{key}_val,{key}_err' for key in mdp_results.keys()])
property_string_mdp = ",".join([f'{str(mdp_results[p])},{str(mdp_err[p])}' for p in mdp_results.keys()])
property_string_smm = ",".join([f'{str(smm_results[p])},{str(smm_err[p])}' for p in smm_results.keys()])
text_file.write('Exp_Name,n_c,n_resample,Final Hypothesis Size,Learning time,'
'Eq. Query Time,Learning Rounds,#MQ Learning,# Steps Learning,'
f'# MQ Eq.Queries,# Steps Eq.Queries ,{properties_string_header}\n')
text_file.write(f'learned_mdp_{exp_name},{n_c},{n_resample},{data_mdp["automaton_size"]},'
f'{data_mdp["learning_time"]},{data_mdp["eq_oracle_time"]},'
f'{data_mdp["learning_rounds"]},{data_mdp["queries_learning"]},{data_mdp["steps_learning"]},'
f'{data_mdp["queries_eq_oracle"]},{data_mdp["steps_eq_oracle"]},'
f'{property_string_mdp}\n')
text_file.write(f'learned_smm_{exp_name},{n_c},{n_resample},{data_smm["automaton_size"]},'
f'{data_smm["learning_time"]},{data_smm["eq_oracle_time"]}, '
f'{data_smm["learning_rounds"]},{data_smm["queries_learning"]},{data_smm["steps_learning"]},'
f'{data_smm["queries_eq_oracle"]},{data_smm["steps_eq_oracle"]},'
f'{property_string_smm}\n')
text_file.flush()
text_file.close()
print('Exp duration', time.time() - start)
================================================
FILE: Benchmarking/stochastic_benchmarking/StochasticBenchmarkingWPrism.py
================================================
import random
import aalpy.paths
from aalpy.SULs import AutomatonSUL
from aalpy.learning_algs import run_stochastic_Lstar
from aalpy.oracles.RandomWalkEqOracle import RandomWalkEqOracle
from aalpy.utils import load_automaton_from_file, get_correct_prop_values, get_properties_file
from aalpy.utils import model_check_experiment
from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion
path_to_dir = '../DotModels/MDPs/'
files = ['first_grid.dot', 'second_grid.dot',
'slot_machine.dot', 'mqtt.dot', 'tcp.dot'] # 'shared_coin.dot'
aalpy.paths.path_to_prism = "C:/Program Files/prism-4.6/bin/prism.bat"
aalpy.paths.path_to_properties = "../prism_eval_props/"
n_c = 20
n_resample = 1000
min_rounds = 10
max_rounds = 1000
strategy = "normal"
for seed in range(1, 4):
random.seed(seed)
benchmark_dir = f"benchmark_complete_no_cq/benchmark_data_{seed}"
import os
if not os.path.exists(benchmark_dir):
os.makedirs(benchmark_dir)
text_file = open(f"{benchmark_dir}/StochasticExperiments.csv", "w")
uniform_parameters = True
for file in files:
exp_name = file.split('.')[0]
if uniform_parameters:
if exp_name == 'first_grid':
n_c, n_resample = n_c, n_resample
elif exp_name == 'second_grid':
n_c, n_resample = n_c, n_resample
elif exp_name == 'shared_coin':
n_c, n_resample = n_c, n_resample
elif exp_name == 'slot_machine':
n_c, n_resample = n_c, n_resample
elif exp_name == 'mqtt':
n_c, n_resample = n_c, n_resample
elif exp_name == 'tcp':
n_c, n_resample = n_c, n_resample
else:
if exp_name == 'first_grid':
n_c, n_resample = 20, 1000
elif exp_name == 'second_grid':
n_c, n_resample = 20, 1000
elif exp_name == 'shared_coin':
n_c, n_resample = 30, 3000
elif exp_name == 'slot_machine':
n_c, n_resample = 40, 5000
elif exp_name == 'mqtt':
n_c, n_resample = 20, 1000
elif exp_name == 'tcp':
n_c, n_resample = 20, 1000
original_mdp = load_automaton_from_file(path_to_dir + file, automaton_type='mdp')
input_alphabet = original_mdp.get_input_alphabet()
mdp_sul = AutomatonSUL(original_mdp)
eq_oracle = RandomWalkEqOracle(input_alphabet, mdp_sul, num_steps=n_resample * (1 / 0.25),
reset_after_cex=True, reset_prob=0.25)
learned_mdp, data_mdp = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='mdp',
n_c=n_c, n_resample=n_resample, min_rounds=min_rounds, strategy=strategy,
max_rounds=max_rounds, return_data=True, samples_cex_strategy="bfs")
mdp_sul.num_steps = 0
mdp_sul.num_queries = 0
learned_smm, data_smm = run_stochastic_Lstar(input_alphabet, mdp_sul, eq_oracle, automaton_type='smm',
n_c=n_c, n_resample=n_resample, min_rounds=min_rounds, strategy=strategy,
max_rounds=max_rounds, return_data=True, samples_cex_strategy="bfs")
smm_2_mdp = smm_to_mdp_conversion(learned_smm)
mdp_results, mdp_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), learned_mdp)
smm_results, smm_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), smm_2_mdp)
properties_string_header = ",".join([f'{key}_val,{key}_err' for key in mdp_results.keys()])
property_string_mdp = ",".join([f'{str(mdp_results[p])},{str(mdp_err[p])}' for p in mdp_results.keys()])
property_string_smm = ",".join([f'{str(smm_results[p])},{str(smm_err[p])}' for p in smm_results.keys()])
text_file.write('Exp_Name, n_c, n_resample, Final Hypothesis Size, Learning time,'
'Eq. Query Time, Learning Rounds, #MQ Learning, # Steps Learning,'
f'# MQ Eq.Queries, # Steps Eq.Queries , {properties_string_header}\n')
text_file.write(f'learned_mdp_{exp_name},{n_c},{n_resample}, {data_mdp["automaton_size"]}, '
f'{data_mdp["learning_time"]}, {data_mdp["eq_oracle_time"]}, '
f'{data_mdp["learning_rounds"]}, {data_mdp["queries_learning"]}, {data_mdp["steps_learning"]},'
f'{data_mdp["queries_eq_oracle"]}, {data_mdp["steps_eq_oracle"]},'
f'{property_string_mdp}\n')
text_file.write(f'learned_smm_{exp_name},{n_c},{n_resample}, {data_smm["automaton_size"]}, '
f'{data_smm["learning_time"]}, {data_smm["eq_oracle_time"]}, '
f'{data_smm["learning_rounds"]}, {data_smm["queries_learning"]}, {data_smm["steps_learning"]},'
f'{data_smm["queries_eq_oracle"]}, {data_smm["steps_eq_oracle"]},'
f'{property_string_smm}\n')
text_file.flush()
text_file.close()
================================================
FILE: Benchmarking/stochastic_benchmarking/passive_mdp_vs_smm.py
================================================
import random
import os
import aalpy.paths
from aalpy.SULs import AutomatonSUL
from aalpy.automata.StochasticMealyMachine import smm_to_mdp_conversion
from aalpy.learning_algs import run_Alergia, run_JAlergia
from aalpy.utils import load_automaton_from_file, get_correct_prop_values, get_properties_file, visualize_automaton
from aalpy.utils import model_check_experiment
path_to_dir = '../DotModels/MDPs/'
files = ['first_grid.dot', 'second_grid.dot',
'slot_machine.dot', 'mqtt.dot', 'tcp.dot'] # 'shared_coin.dot'
aalpy.paths.path_to_prism = "C:/Program Files/prism-4.7/bin/prism.bat"
aalpy.paths.path_to_properties = "../prism_eval_props/"
def writeSamplesToFile(samples, path="alergiaSamples.txt"):
isSMM = False
if isinstance(samples[0][0], tuple):
isSMM = True
with open(path, 'a') as f:
for sample in samples:
s = "" if isSMM else f'{str(sample.pop(0))}'
for i, o in sample:
s += f',{i},{o}'
f.write(s + '\n')
f.close()
# samples.clear()
def deleteSampleFile(path="alergiaSamples.txt"):
import os
if os.path.exists(path):
os.remove(path)
num_traces = 100000
for file in ['first_grid.dot']:
exp_name = file.split('.')[0]
original_mdp = load_automaton_from_file(path_to_dir + file, automaton_type='mdp')
input_alphabet = original_mdp.get_input_alphabet()
mdp_sul = AutomatonSUL(original_mdp)
for _ in range(1):
data = []
for _ in range(num_traces):
sample = [mdp_sul.pre()]
for _ in range(random.randint(10, 50)):
i = random.choice(input_alphabet)
o = mdp_sul.step(i)
sample.append((i, o))
data.append(sample)
mdp_sul.post()
learned_mdp = run_Alergia(data, automaton_type='mdp')
for s in data:
s.pop(0)
learned_smm = run_Alergia(data, automaton_type='smm')
smm_2_mdp = smm_to_mdp_conversion(learned_smm)
mdp_results, mdp_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), learned_mdp)
smm_results, smm_err = model_check_experiment(get_properties_file(exp_name),
get_correct_prop_values(exp_name), smm_2_mdp)
print(learned_mdp.size, learned_smm.size, smm_2_mdp.size)
print(f'-------{exp_name}---------')
print(f'MDP Error: {mdp_err}')
print(f'SMM Error: {smm_err}')
smm_diff = {}
for key, val in mdp_err.items():
if key not in smm_err.keys() or smm_err[key] == 0:
continue
smm_diff[key] = round(smm_err[key] - val, 2)
print(f'SMM differance: {smm_diff}')
================================================
FILE: Benchmarking/stochastic_benchmarking/plot_error_steps.py
================================================
import matplotlib.pyplot as plt
from aalpy.utils import load_automaton_from_file
from aalpy.utils import statistical_model_checking
model = load_automaton_from_file('../../DotModels/MDPs/bluetooth.dot', automaton_type='mdp')
steps = [3, 5, 8, 11, 14, 17, 20]
# statistical_tests= []
# for s in steps:
# e = statistical_model_checking(model, {'no_response'}, s)
# statistical_tests.append(e)
#
# print(statistical_tests)
def crash_plot():
original_values_crash = [0, 0.16800000000000004, 0.3926480000000001, 0.5572338000000001, 0.6772233874640001,
0.7646958490393682, 0.8284632739463244]
random_input_crash = [0, 0.001981749035076958, 0.0281691469985939, 0.03959723310087103, 0.046344616720299714,
0.04758085064218106, 0.05136504760916134]
# smm learning took 34k queries
smm_crash_property = [0, 0.174, 0.4046, 0.5714, 0.6915, 0.778, 0.8402, ]
# mdp learning 180k queries
mdp_crash_property = [0, 0.1899, 0.4624, 0.6471, 0.7684, 0.848, 0.9002, ]
plt.plot(steps, original_values_crash, label='Correct Values')
plt.plot(steps, smm_crash_property, label='SMM Values')
plt.plot(steps, mdp_crash_property, label='MDP Values')
plt.plot(steps, random_input_crash, label='Random Inputs')
plt.xticks(steps)
plt.grid()
plt.legend()
plt.show()
def no_response():
original_values_no_response = [0.36000000000000004, 0.5904, 0.7902848, 0.8926258176000001,
0.9450244186112, 0.9718525023289344, 0.9855884811924145]
mdp_no_response = [0.3993, 0.6407, 0.8315, 0.921, 0.9629, 0.9826, 0.9918]
smm_no_response = [0.3652, 0.5966, 0.7956, 0.8965, 0.9476, 0.9734, 0.9865]
random_input_no_response = [0.29130767125614576, 0.30205630054639654, 0.3069446148329197, 0.31200279332244946,
0.3154095142827484, 0.3145318825672143, 0.3125784442326385]
plt.plot(steps, original_values_no_response, label='Correct Values')
plt.plot(steps, smm_no_response, label='SMM Values')
plt.plot(steps, mdp_no_response, label='MDP Values')
plt.plot(steps, random_input_no_response, label='Random Inputs')
plt.xticks(steps)
plt.grid()
plt.legend()
plt.show()
def side_by_side():
original_values_crash = [0, 0.16800000000000004, 0.3926480000000001, 0.5572338000000001, 0.6772233874640001,
0.7646958490393682, 0.8284632739463244]
random_input_crash = [0, 0.001981749035076958, 0.0281691469985939, 0.03959723310087103, 0.046344616720299714,
0.04758085064218106, 0.05136504760916134]
# smm learning took 34k queries
smm_crash_property = [0, 0.174, 0.4046, 0.5714, 0.6915, 0.778, 0.8402, ]
# mdp learning 180k queries
mdp_crash_property = [0, 0.1899, 0.4624, 0.6471, 0.7684, 0.848, 0.9002, ]
original_values_no_response = [0.36000000000000004, 0.5904, 0.7902848, 0.8926258176000001,
0.9450244186112, 0.9718525023289344, 0.9855884811924145]
mdp_no_response = [0.3993, 0.6407, 0.8315, 0.921, 0.9629, 0.9826, 0.9918]
smm_no_response = [0.3652, 0.5966, 0.7956, 0.8965, 0.9476, 0.9734, 0.9865]
random_input_no_response = [0.29130767125614576, 0.30205630054639654, 0.3069446148329197, 0.31200279332244946,
0.3154095142827484, 0.3145318825672143, 0.3125784442326385]
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(5, 3))
axes[0].plot_side_by_side()
axes[0].plot_side_by_side()
axes[0].plot_side_by_side()
axes[0].plot_side_by_side()
axes[0].set_xlabel('Steps to \'crash\'')
axes[0].set_ylabel('Property Value')
axes[1].plot_side_by_side()
axes[1].plot_side_by_side()
axes[1].plot_side_by_side()
axes[1].plot_side_by_side()
axes[1].set_xlabel('Steps to \'no_response\'')
axes[1].set_ylabel('Property Value')
axes[0].set_xticks(steps)
axes[1].set_xticks(steps)
axes[0].grid()
axes[1].grid()
axes[0].legend()
fig.tight_layout()
plt.show()
# import tikzplotlib
# tikzplotlib.save("properties_over_time.tex")
crash_plot()
================================================
FILE: Benchmarking/stochastic_benchmarking/stochastic_benchmark_random_automata.py
================================================
from itertools import product
from aalpy.SULs import AutomatonSUL
from aalpy.learning_algs import run_stochastic_Lstar
from aalpy.oracles import RandomWordEqOracle
from aalpy.utils import generate_random_mdp, generate_random_smm
automata_size = [5, 10, 15, 20, 30, 50, ]
inputs_size = [2, 3, 5, 7, 9]
outputs_size = [2, 5, 10, 15, 20]
inputs_size = [7]
outputs_size = [15]
def learn(mdp, type):
input_al = mdp.get_input_alphabet()
sul = AutomatonSUL(mdp)
eq_oracle = RandomWordEqOracle(input_al, sul, num_walks=1000, min_walk_len=4, max_walk_len=20)
return run_stochastic_Lstar(input_al, sul, eq_oracle, automaton_type=type, cex_processing=None, print_level=0,
return_data=True)
num_queries_mdp = []
num_queries_smm = []
# i = 0
# for p in product(automata_size, inputs_size, outputs_size):
# num_states, num_inputs, num_outputs = p
# if num_inputs > num_outputs:
# continue
#
# print(i)
# i += 1
#
# # random_mdp = generate_random_mdp(num_states=num_states, input_size=num_inputs, output_size=num_outputs)
# random_smm = generate_random_mdp(num_states=num_states, input_size=num_inputs, output_size=num_outputs)
# # random_smm = random_smm.to_mdp()
#
# _, mdp_data = learn(random_smm, 'mdp')
# _, smm_data = learn(random_smm, 'smm')
#
# num_queries_mdp.append(mdp_data['queries_learning'] + mdp_data['queries_eq_oracle'])
# num_queries_smm.append(smm_data['queries_learning'] + smm_data['queries_eq_oracle'])
print(num_queries_mdp)
print(num_queries_smm)
num_queries_mdp_3_7 = [77115, 85440, 36326, 132485, 250055, 343526]
num_queries_smm_3_7 = [23511, 14287, 17106, 55482, 50935, 99730]
num_queries_mdp_4_10 = [54654, 265240, 245245, 238944, 320026, 1170086]
num_queries_smm_4_10 = [7122, 42637, 32431, 51821, 75703, 204150]
num_queries_mdp_7_15 = [237731, 397386, 924637, 2066456, 4117725, 4774201]
num_queries_smm_7_15 = [15733, 19148, 52214, 106436, 157414, 605491]
# mdp was used for data gen
mdp_base_num_queries_mdp_3_7 = [6515, 13659, 42904, 31798, 129641, 128275]
mdp_base_num_queries_smm_3_7 = [7383, 16985, 55428, 78679, 230936, 479493]
mdp_base_num_queries_mdp_4_10 = [10110, 14032, 61815, 35108, 61489, 115270]
mdp_base_num_queries_smm_4_10 = [8284, 11257, 38399, 49637, 74183, 145063]
mdp_base_num_queries_mdp_7_15 = [7611, 16438, 12564, 33355, 76704, 348364]
mdp_base_num_queries_smm_7_15 = [12132, 29568, 36804, 60763, 95613, 348675]
pairs_smm_base = [(num_queries_mdp_3_7, num_queries_smm_3_7), (num_queries_mdp_4_10, num_queries_smm_4_10),
(num_queries_mdp_7_15, num_queries_smm_7_15)]
pairs_mdp_base = [(mdp_base_num_queries_mdp_3_7, mdp_base_num_queries_smm_3_7),
(mdp_base_num_queries_mdp_4_10, mdp_base_num_queries_smm_4_10),
(mdp_base_num_queries_mdp_7_15, mdp_base_num_queries_smm_7_15)]
#
for mdp, smm in pairs_mdp_base:
save = []
for m, s in zip(mdp, smm):
save.append(100 - round(s / m * 100, 2))
print(save)
smm_save_3_7 = [69.51, 83.28, 52.91, 58.12, 79.63, 70.97]
smm_save_4_10 = [86.97, 83.93, 86.78, 78.31, 76.34, 82.55]
smm_save_7_15 = [93.38, 95.18, 94.35, 94.85, 96.18, 87.32]
#
# def plot_queries_smm_as_base():
# import matplotlib.pyplot as plt
#
# plt.plot(automata_size, smm_save_3_7, label='I:3,O: 7)')
# plt.plot(automata_size, smm_save_4_10, label='I:4,O: 10')
# plt.plot(automata_size, smm_save_7_15, label='I:7,O: 15')
#
# plt.xticks(automata_size)
#
# plt.grid()
# plt.legend()
# plt.show()
# plot_queries_smm_as_base()
================================================
FILE: Benchmarking/stochastic_benchmarking/strategy_comp.py
================================================
from statistics import mean
from aalpy.learning_algs.stochastic.DifferenceChecker import AdvancedHoeffdingChecker, HoeffdingChecker
import aalpy.paths
from aalpy.SULs import AutomatonSUL
from aalpy.learning_algs import run_stochastic_Lstar
from aalpy.oracles import RandomWordEqOracle
from aalpy.utils import load_automaton_from_file, model_check_experiment, get_properties_file, get_correct_prop_values
aalpy.paths.path_to_prism = "C:/Program Files/prism-4.6/bin/prism.bat"
aalpy.paths.path_to_properties = "../../Benchmarking/prism_eval_props/"
example = 'mqtt'
mdp = load_automaton_from_file(f'../../DotModels/MDPs/{example}.dot', automaton_type='mdp')
strategies = [AdvancedHoeffdingChecker(alpha=0.001), 'chi2']
def learn(strategy):
input_al = mdp.get_input_alphabet()
sul = AutomatonSUL(mdp)
eq_oracle = RandomWordEqOracle(input_al, sul, num_walks=1000, min_walk_len=4, max_walk_len=20)
model, data = run_stochastic_Lstar(input_al, sul, eq_oracle, automaton_type='smm', strategy=strategy,
cex_processing=None, print_level=0, return_data=True)
num_queries = data['queries_learning'] + data['queries_eq_oracle']
_, diff = model_check_experiment(get_properties_file(example), get_correct_prop_values(example), model.to_mdp())
avg_error = mean(diff.values())
return num_queries, avg_error
# strategies = [HoeffdingChecker(alpha=0.001)]
# for s in strategies:
# plot_points = []
# for _ in range(10):
# plot_points.append(learn(s))
# print(s)
# print(plot_points)
# mqtt
normal_data_mqtt = [(38482, 0.01964), (31372, 0.01652), (31170, 0.00624), (31590, 0.01738), (30987, 0.01796),
(46244, 0.0222), (48533, 0.01496), (31979, 0.02714), (33219, 0.00758), (32295, 0.015560000000000001)]
chi2_data_mqtt = [(155242, 0.01054), (225776, 0.00348), (230385, 0.01838), (70482, 0.01374), (85096, 0.01284),
(64644, 0.0354), (68898, 0.03556), (61591, 0.02464), (61773, 0.0067), (35876, 0.01562)]
#
# bluetooth
normal_data_bt = [(51490, 0.011823076923076924), (66996, 0.015015384615384616), (96563, 0.003523076923076923),
(41964, 0.030623076923076923), (40395, 0.026623076923076923), (62838, 0.012146153846153846),
(123363, 0.03417692307692308), (100228, 0.029423076923076923), (51425, 0.009438461538461538),
(67885, 0.0241)]
chi2_data_bt = [(19523, 0.009261538461538462), (20467, 0.038446153846153845), (34288, 0.058253846153846156),
(30030, 0.03462307692307692), (18773, 0.038215384615384616), (16949, 0.04113846153846154),
(18195, 0.03705384615384615), (19437, 0.030615384615384614), (16834, 0.0487), (16699, 0.09320769230769231)]
def plot():
from matplotlib import pyplot as plt
normal_data, chi2_data = normal_data_mqtt, chi2_data_mqtt
normal_x = [p[0] // 1000 for p in normal_data]
normal_y = [p[1] for p in normal_data]
chi2_x = [p[0] // 1000 for p in chi2_data]
chi2_y = [p[1] for p in chi2_data]
plt.figure()
plt.scatter(normal_x, normal_y, label='Hoeffding')
plt.scatter(chi2_x, chi2_y, label='Chi2')
plt.title('Bluetooth')
plt.xlabel('Number of Queries (in thousands)')
plt.ylabel('Average Error')
plt.legend()
plt.grid()
#plt.show()
import tikzplotlib
tikzplotlib.save('bluetooth_strategy_comp.tex')
def plot_side_by_side():
from matplotlib import pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 3))
normal_x = [p[0] // 1000 for p in normal_data_mqtt]
normal_y = [p[1] for p in normal_data_mqtt]
chi2_x = [p[0] // 1000 for p in chi2_data_mqtt]
chi2_y = [p[1] for p in chi2_data_mqtt]
axes[0].scatter(normal_x, normal_y, label='Hoeffding')
axes[0].scatter(chi2_x, chi2_y, label='Chi2')
normal_x = [p[0] // 1000 for p in normal_data_bt]
normal_y = [p[1] for p in normal_data_bt]
chi2_x = [p[0] // 1000 for p in chi2_data_bt]
chi2_y = [p[1] for p in chi2_data_bt]
axes[1].scatter(normal_x, normal_y, label='Hoeffding')
axes[1].scatter(chi2_x, chi2_y, label='Chi2')
axes[0].set_title('MQTT')
axes[1].set_title('Bluetooth')
axes[0].set_xlabel('Number of Queries (in thousands)')
axes[0].set_ylabel('Average Error')
axes[0].legend()
axes[1].set_xlabel('Number of Queries (in thousands)')
# axes[1].set_ylabel('Average Error')
axes[1].legend()
axes[0].grid()
axes[1].grid()
fig.tight_layout()
plt.show()
import tikzplotlib
# tikzplotlib.save('strategy_comp_side_by_side.tex')
plot_side_by_side()
================================================
FILE: Benchmarking/stochastic_benchmarking/unamb_error_plot.py
================================================
# max_err
# acv_err
# unamb
import matplotlib.pyplot as plt
import matplotlib
mqtt_max_err_1 = [100, 100, 100, 100, 100, 100, 100, 12.25, 7.92, 3.87, 3.99, 6.0, 4.52, 2.99, 3.87, 4.29, 4.7, 4.26,
4.62, 6.41, 4.23, 3.38, 2.79, 2.2, 2.94, 3.04, 2.28, 2.11, 2.2, 2.45, 2.35, 2.12, 2.66, 2.27, 2.23,
2.21, 1.94, 1.82, 1.61, 1.45, 1.41, 1.83, 1.43, 1.42, 1.44, 1.41, 1.37, 1.31, 1.33, 1.3, 1.29, 1.27,
1.47, 1.78, 1.37, 1.28, 1.94, 1.64, 3.48, 3.11, 4.37, 3.93, 3.81, 4.28, 3.99, 4.25, 4.75, 5.14, 5.03,
4.72, 4.74, 4.42, 4.07, 3.72, 3.55, 3.21, 2.99, 2.84, 2.53, 2.75, 2.62, 2.26, 1.78, 1.9, 2.01, 2.09,
2.33, 2.63, 2.75, 2.53, 2.15, 2.48, 3.36, 3.03, 2.87, 3.02, 3.0, 3.04, 2.79, 2.84, 2.67, 3.11, 2.8,
2.52, 2.28, 2.0, 1.81, 1.7, 1.48, 1.74, 1.55, 1.64, 1.46, 1.23, 1.08, 1.26, 1.47, 1.65, 1.56, 1.76,
1.91, 1.97, 2.01, 1.94, 1.7, 1.65, 1.4, 1.15, 1.35, 1.29, 1.49, 1.76, 1.61, 1.66, 1.72, 1.9, 2.14,
1.96, 1.65, 1.56, 1.92, 1.77, 1.66, 1.37, 1.44, 1.41, 1.58, 1.33, 1.18, 1.28, 1.38, 1.17, 1.14, 1.25,
1.4, 1.22, 1.08, 1.09, 1.34, 1.2, 1.09, 1.19, 1.06, 1.07, 1.07, 1.06, 1.06, 1.06, 1.05, 1.05, 1.05,
1.05, 1.05, 1.04, 1.03, 1.02, 1.03, 1.03, 1.05, 1.04, 1.04, 1.04, 1.05, 1.05, 1.06, 1.06, 1.07, 1.07,
1.07, 1.08, 1.07, 1.08, 1.07, 1.06, 1.06, 1.06, 1.06, 1.05, 1.05, 1.03, 1.03, 1.02, 1.14, 1.16, 1.36,
1.18, 1.26, 1.16, 1.35, 1.52, 1.41, 1.46, 1.4, 1.47, 1.59, 1.65, 1.73, 1.8, 1.72, 1.66, 1.65, 1.75,
1.69, 1.72, 1.56, 1.6, 1.7, 1.62, 1.62, 1.61, 1.65, 1.86, 2.01, 1.93, 2.16, 2.11, 2.03, 1.93, 1.85,
1.78, 1.66, 1.67, 1.64, 1.76, 1.77, 1.67, 1.55, 1.59, 1.51, 1.46]
mqtt_avr_err_1 = [100, 100, 100, 100, 100, 100, 100, 9.232, 4.548, 2.572, 2.592, 3.512, 2.89, 2.27, 2.606, 2.616, 2.754,
2.502, 2.6879999999999997, 3.42, 2.404, 2.148, 1.97, 1.672, 2.074, 2.084, 1.724, 1.472, 1.2, 1.066,
1.292, 1.348, 1.596, 1.506, 1.408, 0.91, 1.1219999999999999, 1.178, 1.276, 0.994, 0.918,
1.3519999999999999, 1.158, 1.068, 0.946, 0.708, 0.644, 0.56, 0.586, 0.562, 0.582, 0.764, 1.084, 1.242,
1.08, 1.056, 1.322, 1.184, 1.998, 1.8299999999999998, 2.394, 2.212, 2.112, 2.338, 2.206, 2.322,
2.4859999999999998, 2.634, 2.56, 2.4419999999999997, 2.412, 2.306, 2.166, 2.04, 1.936, 1.754, 1.658,
1.574, 1.428, 1.522, 1.476, 1.3559999999999999, 1.158, 1.19, 1.242, 1.294, 1.376, 1.474, 1.528, 1.432,
1.268, 1.3960000000000001, 1.786, 1.656, 1.616, 1.654, 1.6380000000000001, 1.64, 1.538, 1.556, 1.482,
1.682, 1.534, 1.4220000000000002, 1.318, 1.184, 1.104, 1.064, 0.98, 1.084, 0.994, 1.028,
0.9460000000000001, 0.822, 0.704, 0.832, 0.9440000000000001, 1.026, 0.99, 1.068, 1.134, 1.17, 1.182,
1.144, 1.03, 1.014, 0.912, 0.786, 0.872, 0.848, 0.942, 1.066, 0.992, 1.016, 1.048, 1.1280000000000001,
1.238, 1.164, 1.022, 0.984, 1.146, 1.098, 1.064, 0.934, 0.96, 0.954, 1.026, 0.906, 0.854, 0.928,
0.982, 0.896, 0.882, 0.9359999999999999, 1.008, 0.926, 0.8360000000000001, 0.866, 0.992, 0.922, 0.872,
0.9179999999999999, 0.8460000000000001, 0.796, 0.812, 0.868, 0.852, 0.734, 0.744, 0.702, 0.668, 0.654,
0.762, 0.654, 0.5780000000000001, 0.56, 0.558, 0.526, 0.47800000000000004, 0.462, 0.64, 0.612,
0.5640000000000001, 0.5720000000000001, 0.63, 0.624, 0.588, 0.716, 0.736, 0.6940000000000001, 0.602,
0.578, 0.542, 0.528, 0.554, 0.476, 0.542, 0.704, 0.722, 0.744, 0.728, 0.806, 0.87, 0.868, 0.958,
0.876, 0.918, 0.884, 0.97, 1.052, 1.0, 1.038, 1.016, 1.042, 1.094, 1.116, 1.168, 1.214, 1.19, 1.172,
1.188, 1.224, 1.206, 1.214, 1.1340000000000001, 1.1660000000000001, 1.196, 1.172, 1.178, 1.174, 1.186,
1.276, 1.3459999999999999, 1.318, 1.422, 1.4, 1.3699999999999999, 1.324, 1.296, 1.268, 1.216, 1.226,
1.222, 1.274, 1.274, 1.204, 1.1540000000000001, 1.1640000000000001, 1.1280000000000001,
1.1059999999999999]
mqtt_unamb_1 = [83.33, 68.57, 70.21, 72.06, 73.42, 77.42, 85.22, 86.86, 90.74, 95.11, 97.28, 97.28, 97.28, 97.96, 99.46,
78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57,
78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 79.08, 79.08, 79.08, 79.08, 79.08, 79.08, 79.08, 79.08, 79.08,
79.08, 79.08, 79.08, 79.08, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 79.59, 79.59,
79.59, 79.08, 78.57, 78.57, 79.59, 79.59, 79.59, 80.61, 80.61, 80.61, 80.61, 80.61, 80.61, 80.61, 80.61,
80.61, 80.61, 80.61, 80.61, 80.61, 80.61, 80.61, 80.61, 80.61, 80.61, 80.61, 80.61, 80.61, 80.61, 80.61,
80.61, 80.61, 81.12, 81.12, 81.12, 81.12, 81.12, 80.61, 80.61, 81.12, 81.63, 81.63, 81.63, 81.63, 81.63,
81.12, 81.12, 80.61, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12,
81.12, 81.12, 81.12, 81.12, 81.12, 80.61, 80.61, 80.61, 80.61, 80.61, 80.61, 80.61, 80.1, 80.1, 80.1,
80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.61, 80.1, 81.12, 81.12, 81.12, 81.63, 81.63, 81.63, 81.63,
81.63, 81.63, 82.14, 81.63, 81.63, 81.63, 81.63, 81.63, 81.63, 81.63, 81.63, 81.63, 81.63, 81.63, 81.12,
81.12, 81.12, 82.14, 82.14, 82.14, 82.65, 82.65, 82.65, 82.65, 82.65, 82.65, 82.65, 82.65, 82.65, 82.65,
82.65, 82.65, 82.65, 82.65, 82.65, 82.65, 82.65, 83.16, 83.16, 83.16, 83.16, 83.16, 83.16, 82.65, 83.16,
83.16, 83.16, 83.16, 83.16, 83.16, 83.16, 83.16, 83.16, 83.16, 83.67, 83.67, 83.67, 83.67, 83.67, 83.67,
83.67, 83.67, 83.67, 83.67, 83.16, 83.16, 83.16, 83.67, 83.67, 83.67, 83.67, 83.67, 83.67, 83.67, 83.16,
83.16, 83.67, 83.67, 83.67, 84.18, 84.18, 84.18, 84.18, 84.18, 84.69, 84.18, 84.18, 83.67, 84.69, 84.69,
85.2, 85.71, 85.71, 85.71, 85.71, 85.71, 85.71, 85.71, 85.71]
mqtt_max_err_2 = [100, 100, 100, 100, 100, 100, 100, 9.76, 1.89, 28.08, 6.22, 3.56, 3.26, 3.51, 3.12, 2.79, 2.93, 2.61,
2.06, 2.14, 2.17, 2.06, 2.49, 2.2, 2.46, 2.37, 2.61, 2.67, 2.61, 2.67, 2.79, 2.84, 2.8, 2.92, 3.02,
3.01, 2.97, 3.26, 3.21, 3.04, 2.57, 3.15, 2.49, 2.66, 3.59, 3.66, 3.15, 3.55, 3.46, 3.52, 3.72, 3.42,
3.61, 3.14, 3.63, 3.51, 3.32, 2.92, 3.46, 3.61, 3.45, 3.54, 4.12, 4.81, 4.81, 4.53, 4.76, 4.41, 4.79,
5.14, 5.08, 5.31, 4.86, 4.66, 4.08, 4.29, 4.81, 4.43, 4.77, 4.58, 4.58, 4.77, 4.52, 4.36, 4.2, 4.45,
4.29, 4.23, 4.38, 4.57, 4.38, 4.34, 4.29, 4.05, 4.03, 3.83, 3.65, 3.43, 3.33, 3.78, 3.53, 3.55, 3.4,
3.34, 3.06, 3.43, 3.61, 3.61, 3.89, 3.58, 3.4, 3.23, 2.84, 2.57, 2.79, 2.8, 2.9, 2.68, 2.98, 2.76,
2.65, 2.4, 2.13, 2.38, 2.5, 2.53, 2.37, 2.47, 2.46, 2.23, 2.23, 2.35, 2.27, 2.1, 2.1, 1.86, 1.83,
1.93, 2.07, 1.76, 1.9, 1.63, 1.65, 1.52, 1.53, 1.93, 1.82, 1.63, 1.77, 1.57, 1.56, 1.57, 1.56, 1.76,
1.76, 2.02, 1.93, 1.92, 1.77, 1.96, 1.78, 1.72, 1.63, 1.4, 1.63, 1.49, 1.55, 1.64, 1.74, 1.5, 1.8,
1.66, 1.87, 2.05, 2.03, 2.35, 2.29, 2.34, 2.16, 2.05, 1.93, 1.93, 1.83, 1.84, 1.94, 2.17, 2.32, 2.7,
2.56, 2.45, 2.61, 2.63, 2.69, 2.73, 2.56, 2.56, 2.68, 2.53, 2.55, 2.66, 2.7, 2.56, 2.6, 2.68, 2.52,
2.44, 2.34, 2.23, 2.38, 2.34, 2.8, 2.84, 2.7, 2.65, 2.67, 2.73, 2.73, 2.82, 2.79, 2.77, 2.84, 2.78,
2.82, 2.9, 2.83, 2.82, 2.9, 2.81, 2.7, 2.7, 2.64, 2.57, 2.5, 2.58, 2.5, 2.61, 2.59, 2.71, 2.72, 2.67,
2.64, 2.76, 2.77, 2.75, 2.72, 2.74, 2.68, 2.6, 2.53, 2.49]
mqtt_avr_err_2 = [100, 100, 100, 100, 100, 100, 100, 6.438, 0.984, 9.953999999999999, 3.312, 2.216, 1.238, 1.426, 1.19,
1.204, 1.62, 1.6139999999999999, 1.214, 1.252, 1.03, 0.9480000000000001, 1.4320000000000002, 1.368,
1.228, 1.086, 0.95, 1.1159999999999999, 1.028, 1.31, 1.236, 1.5899999999999999, 1.1139999999999999,
1.25, 1.354, 1.462, 1.9100000000000001, 2.152, 2.002, 1.8900000000000001, 1.97, 2.2439999999999998,
1.976, 2.048, 2.452, 2.536, 2.332, 2.454, 2.452, 2.5060000000000002, 2.552, 2.366, 2.294, 2.24, 2.436,
2.46, 2.316, 2.064, 2.32, 2.422, 2.328, 2.314, 2.548, 2.846, 2.856, 2.7439999999999998, 2.854, 2.634,
2.736, 2.868, 2.8779999999999997, 2.952, 2.726, 2.64, 2.38, 2.474, 2.686, 2.518, 2.638,
2.5100000000000002, 2.518, 2.606, 2.4699999999999998, 2.37, 2.284, 2.426, 2.346, 2.3280000000000003,
2.368, 2.45, 2.368, 2.36, 2.328, 2.252, 2.2520000000000002, 2.116, 2.082, 1.976, 1.97, 2.178, 2.038,
2.054, 2.004, 1.966, 1.832, 2.002, 2.09, 2.096, 2.214, 2.072, 1.974, 1.882, 1.698, 1.542,
1.6400000000000001, 1.652, 1.722, 1.6059999999999999, 1.746, 1.6159999999999999, 1.5899999999999999,
1.466, 1.356, 1.446, 1.304, 1.3379999999999999, 1.262, 1.302, 1.322, 1.22, 1.224, 1.294, 1.416, 1.324,
1.3439999999999999, 1.248, 1.22, 1.256, 1.3119999999999998, 1.182, 1.26, 1.1239999999999999, 1.128,
1.084, 1.124, 1.316, 1.26, 1.17, 1.226, 1.102, 1.1, 1.11, 1.106, 1.19, 1.19, 1.31, 1.274, 1.272,
1.214, 1.278, 1.198, 1.182, 1.128, 1.036, 1.154, 1.076, 1.104, 1.14, 1.18, 1.068, 1.188,
1.1159999999999999, 1.216, 1.3179999999999998, 1.296, 1.438, 1.412, 1.42, 1.34, 1.286, 1.232, 1.228,
0.996, 1.002, 1.054, 1.164, 1.24, 1.402, 1.342, 1.294, 1.3599999999999999, 1.3679999999999999, 1.39,
1.416, 1.336, 1.332, 1.3840000000000001, 1.314, 1.3439999999999999, 1.398, 1.42, 1.368,
1.3940000000000001, 1.426, 1.352, 1.316, 1.262, 1.22, 1.28, 1.264, 1.474, 1.5, 1.4460000000000002,
1.426, 1.426, 1.452, 1.452, 1.5, 1.482, 1.482, 1.51, 1.478, 1.492, 1.526, 1.494, 1.49, 1.518, 1.47,
1.422, 1.424, 1.4020000000000001, 1.3659999999999999, 1.328, 1.364, 1.324, 1.3760000000000001, 1.368,
1.416, 1.42, 1.396, 1.3840000000000001, 1.436, 1.44, 1.43, 1.4100000000000001, 1.4180000000000001,
1.4000000000000001, 1.36, 1.3359999999999999, 1.316]
mqtt_unamb_2 = [83.33, 79.41, 73.91, 74.14, 75.0, 78.49, 84.48, 89.13, 91.36, 95.65, 96.2, 98.37, 98.37, 99.46, 99.46,
99.46, 100.0, 100.0, 99.46, 78.06, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57,
78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 79.08, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57,
78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 78.57, 79.08, 79.08,
79.59, 79.08, 79.59, 79.59, 79.59, 79.59, 79.59, 79.59, 79.59, 79.59, 79.59, 79.59, 79.59, 79.59, 79.59,
79.59, 79.59, 79.59, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 79.59, 80.1, 80.1, 80.1, 80.1,
80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1,
80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.61, 80.61, 80.61, 80.61, 80.61, 81.12,
81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 80.61, 80.61, 80.61, 80.61, 80.1, 80.1, 80.1, 80.1,
80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.1, 80.61, 81.12, 81.12, 81.12,
81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12, 81.12,
81.12, 81.12, 81.12, 81.12, 82.14, 82.14, 82.14, 82.14, 81.63, 81.63, 81.63, 82.14, 82.14, 82.14, 82.14,
82.14, 82.65, 82.14, 82.14, 82.14, 82.14, 82.14, 82.14, 82.14, 82.14, 82.65, 82.65, 82.65, 82.14, 82.14,
82.14, 82.14, 82.14, 81.63, 81.63, 81.63, 82.14, 82.14, 82.65, 82.65, 83.16, 83.16, 83.16, 83.16, 83.16,
83.16, 83.16, 83.16, 83.16, 82.65, 82.65, 82.65, 82.65, 83.16, 83.16, 83.16, 82.65, 82.65, 82.65, 82.65,
82.65, 82.65, 82.65, 82.65, 83.16, 83.16, 83.16, 83.16, 83.16, 83.16, 83.16, 83.16, 83.16, 83.16, 82.65,
82.65, 82.65, 82.65, 82.65, 82.65]
second_grid_max_err_1 = [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 97.43, 93.46, 85.46, 42.15,
24.14, 22.72, 18.37, 17.29, 19.52, 19.32, 16.22, 16.41, 16.87, 16.86, 17.05, 17.1, 17.31, 17.2,
0.74, 0.71, 0.77, 0.8, 0.63, 0.65, 0.71, 0.68, 0.75, 0.69, 0.78, 0.6, 0.64, 0.73, 0.72, 4.92,
4.59, 4.53, 4.55, 4.47, 1.28, 1.46, 1.39, 1.57, 2.48, 2.47, 2.08, 2.1, 1.9, 1.14, 1.28, 0.89,
0.94, 5.16, 5.21, 5.2, 5.16, 5.15, 5.48, 4.9, 4.84, 4.91, 5.14, 5.11, 5.15, 5.15, 5.1, 5.37,
5.36, 5.31, 5.27, 5.32, 5.27, 5.21, 5.22, 5.17, 5.19, 5.15, 5.14, 5.11, 5.25, 5.2, 5.4, 5.39,
5.34, 5.36, 5.29, 5.29, 5.28, 5.29, 0.47, 0.46, 0.46, 0.41, 0.41, 0.43, 0.38, 0.39, 0.42, 0.41,
0.42, 0.46, 0.41, 0.4, 0.43, 0.53, 0.48, 0.51, 0.55, 0.52, 0.61, 0.45, 0.47, 0.47, 0.3, 0.26,
0.31, 0.4, 0.33, 0.24, 0.27, 0.2, 0.27, 0.21, 0.28, 0.3, 0.36, 0.42, 0.34, 0.5, 0.65, 0.57,
0.44, 0.62, 0.7, 0.69, 0.61, 0.49, 0.46, 0.55, 0.47, 0.4, 0.43, 0.49, 0.41, 0.41, 0.4, 0.43,
0.35, 0.33, 0.39, 0.46, 0.48, 0.51, 0.57, 0.49, 0.61, 0.58, 0.6, 0.57, 0.48, 0.65, 0.54, 0.52,
0.57, 0.62, 0.57, 0.75, 0.72, 0.65, 0.66, 0.6, 0.55, 0.52, 0.48, 0.49, 0.54, 0.52, 0.51, 0.55,
0.4, 0.37, 0.38, 0.48, 0.65, 0.67, 0.64, 0.67, 0.64, 0.66, 0.78, 0.69, 0.64, 0.63, 0.55, 0.53,
0.61, 0.59, 0.56, 0.56, 0.57, 0.58, 0.64, 0.57, 0.6, 0.7, 0.64, 0.5, 0.48, 0.41, 0.41, 0.47,
0.45, 0.45, 0.42, 0.33, 0.36, 0.37, 0.4, 0.39, 0.35, 0.34, 0.36, 0.31, 0.34, 0.35, 0.32, 0.41,
0.46, 0.4, 0.41, 0.43, 0.39, 0.34, 0.41, 0.48, 0.54, 0.43, 0.37]
second_grid_avr_err_1 = [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 66.4425, 58.0325,
40.832499999999996, 20.5425, 10.545, 10.945, 10.030000000000001, 7.035, 10.91, 10.6475, 5.755,
5.78, 6.0075, 5.4275, 5.455, 5.4350000000000005, 5.52, 5.6325, 0.5825, 0.5625, 0.495, 0.5225,
0.505, 0.5, 0.5125, 0.47750000000000004, 0.51, 0.51, 0.4625, 0.355, 0.34500000000000003,
0.4025, 0.4025, 1.765, 1.8175, 1.74, 1.6075, 1.6375, 0.84, 0.8825, 0.8674999999999999, 0.895,
1.19, 1.165, 1.0050000000000001, 1.02, 0.9375, 0.6375, 0.7050000000000001, 0.5125, 0.53, 1.8,
1.7575, 1.6975, 1.625, 1.7125000000000001, 1.7950000000000002, 1.705, 1.665, 1.53, 1.5725,
1.5550000000000002, 1.51, 1.52, 1.4849999999999999, 1.47, 1.435, 1.4224999999999999, 1.4175,
1.4075, 1.3975, 1.41, 1.3975, 1.365, 1.4025, 1.3900000000000001, 1.3824999999999998, 1.35,
1.46, 1.4200000000000002, 1.4975, 1.4874999999999998, 1.4575, 1.4575, 1.4425000000000001,
1.4575, 1.4375, 1.485, 0.3075, 0.3325, 0.33, 0.2475, 0.265, 0.31, 0.2325, 0.2625, 0.27, 0.24,
0.2625, 0.315, 0.22, 0.2225, 0.3075, 0.355, 0.3175, 0.325, 0.3325, 0.315, 0.335, 0.2625,
0.2675, 0.2675, 0.19, 0.1675, 0.185, 0.21000000000000002, 0.195, 0.16999999999999998, 0.175,
0.1575, 0.17, 0.165, 0.17, 0.18, 0.19, 0.1925, 0.17250000000000001, 0.24, 0.335, 0.3125,
0.23500000000000001, 0.3075, 0.3325, 0.33999999999999997, 0.28, 0.24, 0.20500000000000002,
0.24000000000000002, 0.2025, 0.1875, 0.185, 0.1875, 0.16749999999999998, 0.1825, 0.1875,
0.1925, 0.1825, 0.185, 0.20750000000000002, 0.2275, 0.235, 0.255, 0.27999999999999997, 0.2575,
0.3275, 0.295, 0.3075, 0.3025, 0.26, 0.3525, 0.29000000000000004, 0.2775, 0.2975, 0.315,
0.26999999999999996, 0.35250000000000004, 0.33749999999999997, 0.28750000000000003,
0.28500000000000003, 0.265, 0.24500000000000002, 0.2275, 0.21, 0.2175, 0.2275, 0.2325,
0.23500000000000001, 0.24250000000000002, 0.21500000000000002, 0.2075, 0.1975, 0.21, 0.2975,
0.315, 0.3075, 0.315, 0.3075, 0.3275, 0.3775, 0.33249999999999996, 0.3275, 0.315,
0.28500000000000003, 0.275, 0.3125, 0.2975, 0.2725, 0.2725, 0.285, 0.27249999999999996, 0.325,
0.2925, 0.3075, 0.3475, 0.32, 0.255, 0.245, 0.215, 0.2225, 0.22999999999999998, 0.2225, 0.2225,
0.21, 0.1925, 0.185, 0.195, 0.22, 0.2075, 0.1825, 0.1875, 0.1925, 0.175, 0.185, 0.1825, 0.175,
0.195, 0.20750000000000002, 0.185, 0.1975, 0.21, 0.195, 0.17750000000000002,
0.19499999999999998, 0.2225, 0.24000000000000002, 0.20249999999999999, 0.195]
second_grid_unamb_1 = [60.0, 77.78, 71.05, 76.36, 73.61, 79.07, 70.43, 66.67, 74.12, 66.85, 71.2, 71.01, 71.75, 70.28,
67.55, 70.8, 73.08, 72.88, 74.51, 76.22, 76.43, 78.83, 79.8, 79.94, 80.78, 81.43, 82.74, 82.74,
83.06, 84.04, 84.08, 85.02, 85.03, 85.34, 85.99, 85.67, 85.67, 86.32, 84.08, 84.39, 88.85, 87.95,
85.67, 85.67, 85.67, 86.62, 86.94, 86.94, 87.26, 87.26, 87.58, 87.9, 87.9, 88.22, 88.54, 88.54,
88.54, 88.85, 88.85, 88.85, 88.85, 88.85, 88.85, 89.17, 89.49, 89.49, 89.49, 89.49, 89.81, 90.13,
90.45, 90.45, 90.45, 90.45, 90.45, 90.45, 90.45, 90.45, 90.45, 90.45, 90.45, 90.13, 88.51, 90.45,
90.45, 90.45, 90.45, 90.45, 90.45, 90.45, 90.45, 90.45, 90.45, 90.76, 90.76, 90.45, 91.28, 91.08,
91.08, 91.08, 91.08, 91.08, 90.76, 91.08, 91.08, 91.08, 91.08, 91.4, 91.4, 91.4, 91.4, 91.4,
91.4, 91.4, 91.4, 91.4, 91.08, 91.08, 91.08, 91.08, 91.4, 91.4, 91.72, 91.72, 91.72, 91.72,
91.72, 91.72, 92.04, 92.04, 92.36, 92.04, 92.04, 92.04, 92.36, 92.04, 92.04, 92.04, 92.04, 92.68,
92.68, 92.36, 92.68, 92.68, 92.68, 92.68, 92.68, 92.68, 92.68, 92.68, 92.68, 92.68, 92.36, 90.74,
92.68, 92.99, 92.99, 92.99, 92.99, 92.99, 92.68, 92.99, 93.31, 93.31, 93.31, 93.31, 92.99, 92.99,
92.99, 92.99, 92.68, 92.99, 92.68, 92.68, 91.36, 92.68, 92.68, 92.68, 92.68, 92.68, 92.68, 92.68,
92.36, 92.36, 92.68, 92.99, 92.68, 92.99, 92.99, 92.99, 92.99, 92.99, 92.99, 93.31, 92.99, 93.31,
93.31, 93.31, 93.31, 92.99, 93.31, 93.31, 93.31, 93.63, 93.63, 93.63, 93.31, 93.63, 93.95, 93.95,
93.95, 94.27, 94.27, 94.27, 93.95, 94.27, 94.27, 94.59, 94.59, 94.59, 94.59, 94.59, 94.59, 94.59,
94.27, 94.27, 94.27, 94.27, 94.27, 94.59, 94.59, 94.59, 94.59, 94.59, 94.9, 94.59, 94.59, 94.59,
94.59, 94.59, 94.59, 94.59, 94.59, 94.59, 94.9, 94.9, 94.9, 94.9, 94.9, 94.9]
second_grid_max_err_2 = [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 93.98, 97.43, 97.43,
95.88, 96.56, 92.01, 92.67, 91.36, 80.52, 76.18, 48.4, 46.6, 51.42, 6.01, 5.78, 5.81, 5.72,
5.5, 2.22, 2.32, 3.3, 1.98, 1.94, 1.51, 1.53, 1.55, 1.29, 1.12, 1.2, 1.89, 2.48, 2.34, 2.43,
4.38, 4.48, 1.67, 1.65, 1.57, 1.6, 1.48, 1.62, 1.7, 1.46, 1.57, 1.52, 1.33, 1.5, 1.57, 1.31,
1.29, 1.41, 1.35, 1.37, 1.23, 1.21, 1.26, 1.16, 1.12, 1.11, 1.35, 1.37, 1.34, 1.59, 1.74, 1.55,
1.07, 1.05, 1.09, 0.98, 1.25, 0.94, 1.06, 0.96, 0.92, 1.09, 1.14, 1.24, 1.2, 1.32, 1.31, 1.29,
1.18, 1.1, 1.05, 1.18, 1.15, 0.99, 1.17, 1.23, 1.26, 1.27, 1.21, 1.31, 1.24, 1.16, 1.15, 1.17,
1.2, 1.21, 1.16, 1.14, 1.13, 1.12, 1.09, 1.02, 1.01, 0.88, 0.92, 0.9, 0.91, 0.97, 0.98, 0.97,
0.97, 0.99, 1.0, 1.04, 1.0, 1.02, 1.02, 0.91, 0.89, 0.92, 0.96, 1.01, 0.95, 0.98, 0.97, 1.04,
1.08, 1.08, 1.09, 1.12, 1.21, 1.1, 1.09, 1.11, 1.14, 1.14, 1.19, 1.23, 1.19, 1.19, 1.24, 1.27,
1.28, 1.26, 1.26, 1.2, 1.2, 1.17, 1.25, 1.29, 1.25, 1.21, 1.19, 1.14, 1.18, 1.93, 2.21, 2.13,
2.18, 1.97, 1.97, 1.93, 1.9, 1.95, 1.91, 1.91, 1.88, 1.77, 1.77, 1.75, 1.77, 1.78, 1.8, 1.69,
1.62, 1.73, 1.8, 1.81, 1.7, 1.71, 1.64, 1.63, 1.61, 1.5, 1.52, 1.37, 1.31, 1.16, 1.24, 1.2,
1.23, 1.23, 1.13, 1.18, 1.13, 1.08, 1.11, 1.12, 1.14, 1.11, 1.05, 1.06, 1.06, 1.05, 1.0, 1.0,
0.96, 0.79, 0.81, 0.76, 0.81, 0.93, 0.89, 0.83, 0.9, 0.77, 0.78, 0.81, 0.78, 0.84, 0.79, 0.79,
0.82, 0.86, 0.83, 0.79, 0.85, 0.96, 1.06, 1.02, 1.03, 1.07]
second_grid_avr_err_2 = [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 62.2975, 60.6325,
62.205000000000005, 59.2925, 60.722500000000004, 45.675, 45.425, 37.335, 27.685, 22.1, 15.67,
15.345, 15.9075, 3.045, 2.7525, 2.7849999999999997, 2.1999999999999997, 2.06,
1.1400000000000001, 1.355, 1.4024999999999999, 1.0825, 1.0025, 0.765, 0.7825, 0.765, 0.74,
0.835, 0.9025, 0.9975, 1.0875, 1.0725, 1.115, 1.9275, 1.9400000000000002, 1.0425, 1.03, 1.0025,
1.0125, 0.9924999999999999, 0.9175, 0.9149999999999999, 0.8925, 0.87, 0.875, 0.8925, 0.855,
0.8625, 0.875, 0.9, 0.9099999999999999, 0.9375, 0.93, 0.9175, 0.91, 0.8674999999999999, 0.85,
0.8700000000000001, 0.8350000000000001, 0.855, 0.8525, 0.8475, 0.8675, 0.845, 0.81, 0.8025,
0.8075, 0.7775000000000001, 0.77, 0.7525000000000001, 0.775, 0.775, 0.7175, 0.7175, 0.675, 0.7,
0.7375, 0.7275, 0.78, 0.7775, 0.735, 0.7425, 0.72, 0.6525, 0.65, 0.6749999999999999, 0.665,
0.705, 0.6849999999999999, 0.645, 0.625, 0.6175, 0.6325, 0.6275000000000001, 0.655, 0.6725,
0.64, 0.625, 0.6125, 0.595, 0.6, 0.6074999999999999, 0.605, 0.6375000000000001, 0.6, 0.5925,
0.6, 0.6224999999999999, 0.6074999999999999, 0.6275, 0.6475, 0.6575, 0.66, 0.67, 0.645, 0.635,
0.6375, 0.64, 0.64, 0.6425, 0.6575, 0.6799999999999999, 0.6575, 0.6699999999999999, 0.6825,
0.7050000000000001, 0.7025, 0.6925, 0.715, 0.73, 0.725, 0.7325, 0.7275, 0.7374999999999999,
0.7175, 0.7275, 0.7175, 0.7374999999999999, 0.7374999999999999, 0.74, 0.7525, 0.7825, 0.7725,
0.7875, 0.7825, 0.77, 0.775, 0.77, 0.7725, 0.78, 0.77, 0.7675, 0.7925, 0.8, 0.785, 0.78,
0.7775, 0.77, 1.2425, 1.3775, 1.3575, 1.3575, 1.265, 1.2874999999999999, 1.2725, 1.2625,
1.3125, 1.29, 1.2974999999999999, 1.2775, 1.19, 1.1875, 1.1775, 1.205, 1.215, 1.215, 1.1475,
1.0775000000000001, 1.1475, 1.19, 1.205, 1.1125, 1.125, 1.0625, 1.0374999999999999, 1.03,
0.9425, 0.9700000000000001, 0.855, 0.8075, 0.71, 0.78, 0.75, 0.7725, 0.7675, 0.72,
0.7324999999999999, 0.695, 0.6725, 0.7000000000000001, 0.7025, 0.7175, 0.6900000000000001,
0.6625, 0.6625, 0.6575, 0.6475, 0.6025, 0.6, 0.58, 0.46, 0.485, 0.44, 0.48, 0.5875, 0.5575,
0.5, 0.55, 0.455, 0.4625, 0.485, 0.46, 0.51, 0.4625, 0.475, 0.4875, 0.5, 0.47, 0.4325, 0.4775,
0.56, 0.625, 0.595, 0.5875, 0.625]
second_grid_unamb_2 = [60.0, 72.22, 78.95, 80.0, 70.0, 74.12, 72.0, 67.74, 68.42, 69.18, 61.85, 66.14, 66.34, 66.2,
65.49, 63.9, 67.77, 65.49, 62.83, 66.9, 66.43, 64.03, 65.29, 68.73, 70.57, 72.31, 72.64, 73.94,
74.27, 74.92, 76.55, 77.52, 78.83, 79.8, 80.13, 80.78, 81.43, 81.43, 82.08, 82.28, 82.35, 83.71,
83.71, 83.39, 80.25, 80.89, 81.21, 79.39, 83.12, 83.44, 83.76, 84.08, 84.08, 84.08, 84.08, 84.08,
84.08, 83.76, 84.08, 84.08, 85.03, 85.03, 85.03, 84.71, 84.71, 85.35, 84.71, 84.08, 84.71, 84.71,
85.03, 85.35, 85.67, 85.99, 86.62, 86.62, 86.62, 86.94, 86.94, 87.26, 87.26, 87.26, 87.26, 87.26,
87.26, 87.26, 87.26, 87.58, 87.9, 87.9, 87.9, 87.9, 87.9, 87.9, 87.9, 88.22, 88.54, 88.22, 88.22,
88.54, 88.54, 89.17, 89.17, 89.17, 89.17, 89.49, 89.17, 89.81, 90.13, 90.76, 90.76, 90.76, 90.76,
90.76, 90.76, 90.76, 90.45, 90.97, 90.76, 90.76, 90.76, 90.76, 90.76, 90.76, 90.45, 90.45, 90.76,
90.13, 90.13, 90.45, 90.45, 90.45, 90.45, 90.13, 90.45, 90.45, 90.45, 90.45, 90.76, 90.76, 90.76,
90.76, 90.76, 90.45, 90.76, 90.45, 90.76, 91.08, 91.08, 91.08, 91.08, 91.08, 91.4, 91.4, 91.4,
91.72, 91.72, 92.04, 92.04, 92.04, 92.04, 92.04, 92.04, 92.04, 92.04, 92.04, 92.04, 92.36, 92.36,
92.99, 92.99, 92.99, 92.99, 92.99, 92.99, 92.99, 92.99, 93.31, 92.99, 93.31, 93.31, 93.63, 93.63,
93.31, 93.31, 93.31, 93.31, 93.31, 93.31, 93.63, 93.95, 93.63, 93.95, 93.95, 93.95, 94.27, 94.27,
94.27, 94.27, 94.27, 93.95, 93.95, 93.95, 93.95, 93.95, 94.27, 94.27, 94.27, 93.95, 93.95, 93.95,
93.95, 93.95, 93.95, 93.95, 94.27, 93.95, 94.59, 94.27, 94.27, 94.27, 94.27, 93.95, 93.95, 93.63,
93.95, 93.95, 93.95, 93.95, 93.95, 93.95, 93.95, 93.95, 94.59, 94.59, 94.27, 94.27, 94.59, 94.59,
94.59, 94.9, 94.59, 94.59, 94.59, 94.59, 94.59, 94.59, 94.59, 94.59, 94.59]
second_grid_max_err_3 = [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 90.41, 90.74, 90.48, 68.35, 37.94, 65.4, 27.09, 16.96, 17.02, 18.25, 18.23, 17.02, 29.13,
29.3, 28.79, 28.39, 26.64, 26.0, 74.46, 74.61, 70.84, 71.09, 71.1, 71.65, 71.52, 71.42, 71.42,
72.93, 72.56, 72.46, 84.49, 84.49, 84.59, 84.6, 84.69, 84.69, 4.43, 4.47, 4.45, 4.43, 4.33,
4.37, 4.44, 4.76, 5.19, 5.14, 5.39, 5.61, 5.62, 5.49, 5.34, 5.33, 5.46, 5.47, 5.67, 5.54, 5.58,
5.51, 5.44, 5.59, 5.42, 4.73, 4.66, 4.72, 4.69, 4.64, 4.79, 4.76, 5.11, 5.35, 5.82, 5.73, 5.55,
5.57, 5.57, 5.66, 5.65, 6.26, 6.26, 6.33, 6.25, 6.2, 6.11, 6.11, 6.32, 6.15, 6.13, 6.12, 6.14,
6.13, 6.18, 6.18, 5.83, 5.94, 5.94, 5.98, 6.0, 6.03, 6.02, 6.21, 6.15, 5.64, 5.61, 6.1, 6.1,
6.06, 6.11, 6.26, 6.14, 6.07, 6.01, 5.92, 5.93, 5.95, 5.94, 5.83, 5.33, 5.21, 5.25, 5.15, 5.07,
5.13, 5.21, 5.22, 5.25, 5.2, 5.25, 5.27, 5.24, 5.5, 5.49, 5.56, 5.04, 5.09, 4.63, 4.63, 4.59,
4.6, 4.79, 4.73, 4.29, 4.33, 4.33, 4.55, 4.51, 4.51, 4.47, 4.91, 4.96, 5.02, 5.25, 5.25, 5.35,
5.34, 5.36, 5.32, 5.37, 5.34, 5.42, 5.41, 5.4, 5.4, 5.76, 4.84, 4.81, 4.94, 4.94, 5.12, 5.06,
5.01, 5.08, 5.12, 5.07, 5.26, 5.5, 5.5, 5.55, 5.36, 5.4, 5.42, 5.41, 5.0, 4.95, 4.96, 5.02,
5.08, 5.04, 5.22, 5.23, 5.24, 5.47, 5.09, 5.02, 5.05, 5.04, 4.87, 5.16, 5.18, 5.2, 5.28, 5.26,
5.26, 5.61, 5.65, 5.62, 5.6, 5.65, 5.66, 5.63, 5.27, 5.23, 5.28, 5.25, 5.23, 5.12, 2.27, 2.28,
5.17, 5.21, 5.17, 2.32, 2.58, 5.44, 2.62, 2.64, 2.71, 2.66, 2.63, 2.64, 2.6]
second_grid_avr_err_3 = [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 58.4325, 58.6325, 58.385000000000005, 28.154999999999998, 22.125, 25.87, 13.1875, 8.6725,
8.215, 8.55, 9.5825, 10.004999999999999, 13.0275, 13.2925, 13.225, 13.1675, 12.93, 12.81,
28.014999999999997, 28.015, 23.8675, 23.810000000000002, 23.81, 23.9025, 23.91, 23.9175,
23.875, 24.26, 24.3075, 24.287499999999998, 25.6775, 25.645, 25.905, 25.9425, 26.115, 26.0975,
1.6175, 1.755, 1.725, 1.7274999999999998, 1.6775, 1.67, 1.6775, 1.8125, 1.965,
1.9324999999999999, 1.9974999999999998, 2.0575, 2.035, 1.9775, 1.9275, 2.0275, 2.18, 2.1375,
2.1975, 2.13, 2.1475, 2.135, 2.0975, 2.2525, 2.185, 1.9075000000000002, 1.8525, 1.91, 1.8825,
1.8575, 2.095, 2.05, 2.1575, 2.2475, 2.5975, 2.5375, 2.4625, 2.4675000000000002, 2.4425,
2.4625, 2.5100000000000002, 2.7175, 2.7275, 2.835, 2.8175, 2.765, 2.725, 2.785, 2.845, 2.755,
2.7425, 2.73, 2.7925, 2.77, 2.7575, 2.7475, 2.605, 2.7275, 2.7175000000000002, 2.73, 2.735,
2.7225, 2.8, 2.8575, 2.8325, 2.6275, 2.5875, 2.7025, 2.6875, 2.655, 2.6425, 2.6875, 2.6375,
2.5925000000000002, 2.53, 2.48, 2.4475, 2.45, 2.4475000000000002, 2.375, 2.1875, 2.105, 2.12,
2.1475, 2.1, 2.1, 2.1075, 2.0925, 2.09, 2.075, 2.0925, 2.0974999999999997, 2.095, 2.2325,
2.2225, 2.21, 2.015, 2.0225, 1.795, 1.7875, 1.765, 1.7574999999999998, 1.8225, 1.8, 1.615,
1.725, 1.71, 1.8225, 1.7874999999999999, 1.7525, 1.7325, 1.8425, 1.8475, 1.9075, 1.975, 1.965,
1.9725, 1.945, 1.945, 1.98, 1.97, 1.9575, 2.0425, 2.0225, 2.025, 2.025, 2.105, 1.7675, 1.75,
1.7775, 1.7750000000000001, 1.8050000000000002, 1.78, 1.7625, 1.7850000000000001, 1.81, 1.8025,
1.8775, 1.965, 1.9925, 1.97, 1.9175, 1.935, 1.9625, 1.9525000000000001, 1.805, 1.7875, 1.825,
1.8074999999999999, 1.86, 1.805, 1.89, 1.8875000000000002, 1.875, 1.9575, 1.8175, 1.795,
1.8599999999999999, 1.855, 1.7850000000000001, 1.87, 1.8624999999999998, 1.8625, 1.8775,
1.8525, 1.8625, 2.0, 2.025, 2.0225, 2.0425, 2.0575, 2.0625, 2.0675, 1.9574999999999998, 1.9475,
1.9725000000000001, 1.96, 1.9725000000000001, 1.9525000000000001, 1.0675000000000001, 1.1,
2.0125, 2.0, 1.9925, 1.055, 1.1300000000000001, 2.035, 1.1525, 1.115, 1.165, 1.095, 1.1025,
1.1125, 1.09]
second_grid_unamb_3 = [60.0, 66.67, 70.27, 80.0, 79.03, 66.67, 65.71, 69.64, 65.0, 67.11, 66.29, 68.62, 67.01, 71.92,
71.5, 67.86, 64.71, 64.9, 63.89, 65.64, 63.91, 68.42, 67.15, 65.87, 68.23, 66.67, 68.95, 70.92,
73.62, 75.24, 75.9, 77.2, 77.2, 77.52, 77.52, 78.18, 78.5, 79.8, 80.13, 81.11, 81.76, 81.76,
82.41, 83.06, 83.71, 84.36, 84.04, 83.39, 83.71, 84.04, 84.69, 85.02, 85.02, 85.02, 84.69, 85.34,
85.34, 85.99, 85.99, 85.99, 85.99, 85.99, 86.32, 86.64, 86.97, 87.3, 87.3, 84.08, 84.39, 84.39,
84.39, 84.08, 84.08, 84.08, 84.08, 84.08, 85.03, 85.03, 85.03, 85.35, 85.35, 85.35, 85.35, 85.99,
85.99, 86.31, 86.62, 86.62, 86.62, 86.62, 86.62, 86.94, 86.94, 86.94, 86.94, 87.58, 87.58, 87.9,
87.9, 87.9, 87.9, 87.9, 88.22, 88.22, 88.54, 88.54, 88.22, 88.54, 88.54, 88.85, 88.85, 88.85,
89.17, 89.17, 89.49, 89.49, 89.49, 89.81, 89.81, 89.81, 89.81, 89.81, 89.81, 89.81, 89.81, 90.13,
89.81, 89.81, 89.81, 89.81, 89.81, 89.81, 90.13, 90.13, 90.13, 90.13, 90.13, 90.13, 90.45, 90.45,
90.45, 90.45, 90.45, 90.45, 90.45, 90.45, 90.45, 90.45, 90.76, 90.76, 90.76, 90.76, 90.76, 90.76,
91.08, 91.08, 91.08, 91.08, 91.08, 91.08, 91.08, 91.08, 91.08, 91.08, 91.08, 91.08, 91.08, 91.08,
91.08, 90.76, 90.45, 88.54, 90.76, 91.08, 91.08, 91.08, 91.08, 91.08, 91.08, 91.08, 91.4, 91.4,
91.4, 91.72, 91.72, 91.72, 91.72, 91.72, 91.72, 91.72, 91.72, 91.72, 91.4, 91.4, 91.4, 91.72,
91.72, 91.72, 91.72, 91.4, 91.4, 91.4, 91.4, 91.4, 91.4, 91.4, 91.4, 91.4, 91.4, 91.4, 91.4,
91.4, 91.72, 91.72, 91.72, 92.04, 91.72, 91.72, 91.72, 92.04, 92.04, 92.04, 92.04, 92.04, 92.04,
92.04, 92.04, 92.04, 92.04, 92.04, 92.04, 92.04, 92.04, 92.04, 92.36, 92.68, 92.36, 92.36, 92.36,
92.99, 92.68, 92.36, 92.99, 92.99, 93.31, 93.31, 93.31, 93.31, 93.31, 92.99]
second_grid_max_err_mdp_1 = [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 97.43, 97.43, 90.84, 62.94, 62.91, 63.98, 66.71, 66.64,
40.93, 43.14, 43.75, 22.0, 80.76, 83.9, 84.84, 84.59, 83.99, 84.62, 84.17, 85.44, 85.34,
85.27, 79.08, 79.17, 43.98, 37.44, 6.73, 5.35, 3.82, 2.69, 2.36, 2.24, 50.86, 50.78, 57.36,
2.49, 2.37, 2.35, 2.22, 1.99, 70.4, 70.51, 70.85, 1.45, 1.34, 1.49, 1.65, 1.47, 1.34, 1.31,
1.22, 1.26, 1.44, 1.47, 1.48, 1.73, 1.83, 2.03, 1.6, 1.47, 1.71, 1.42, 1.21, 1.08, 1.64,
1.37, 1.28, 2.32, 2.64, 2.47, 2.45, 2.23, 31.97, 32.27, 32.15, 33.57, 33.67, 84.09, 84.12,
84.31, 84.35, 84.31, 84.3, 85.35, 85.36, 85.36, 85.36, 85.37, 85.37, 85.37, 85.37, 85.35,
84.36, 84.38, 84.38, 84.38, 84.31, 84.31, 84.27, 0.93, 29.5, 29.61, 29.61, 28.77, 28.43,
28.53, 28.35, 28.21, 1.36, 1.43, 1.44, 1.39, 6.22, 6.22, 6.19, 6.13, 6.02, 6.02, 5.93,
5.89, 5.9, 5.88, 5.89, 5.94, 5.94, 5.91, 6.02, 5.98, 5.96, 5.88, 5.89, 5.82, 5.77, 5.77,
5.77, 5.76, 5.76, 5.8, 5.76, 5.72, 5.74, 5.73, 5.71, 5.72, 5.7, 5.63, 5.68, 5.62, 5.56,
5.53, 5.56, 5.55, 5.49, 5.5, 5.39, 5.28, 5.31, 5.28, 5.69, 5.71, 5.67, 5.65, 5.65, 5.54,
5.44, 5.35, 5.31, 5.32, 5.27, 5.24, 5.2, 5.16, 5.48, 5.47, 5.51, 5.54, 5.49, 5.51, 5.47,
5.49, 5.42, 5.37, 5.38, 5.35, 5.28, 5.26, 5.25, 5.23, 5.2, 5.24, 5.18, 5.19, 5.18, 5.16,
5.14, 5.13, 5.08, 5.06, 5.04, 5.01, 5.0, 4.97, 4.96, 4.95, 5.24, 5.23, 5.18, 5.16, 5.09,
5.06, 5.01, 5.04, 5.03, 5.05, 5.37, 5.35, 5.3, 5.28, 5.31, 5.33, 5.31, 5.27, 5.31, 5.66,
5.66]
second_grid_avr_err_mdp_1 = [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 63.3325, 65.7775, 54.9275, 23.9625, 25.217499999999998,
32.199999999999996, 35.4375, 29.517500000000002, 23.5125, 21.87, 22.445, 12.545, 27.425,
25.8425, 27.2275, 32.205, 29.0325, 32.825, 32.2825, 31.4275, 30.295, 30.2025, 27.37,
27.4775, 14.165, 12.7975, 3.3625, 2.9425, 2.3525, 1.8025, 1.5325, 1.6675, 14.0725, 14.17,
15.655, 2.065, 1.585, 1.57, 1.57, 1.175, 17.990000000000002, 18.1125, 18.07, 1.035, 0.9875,
1.0325, 1.0975, 1.055, 0.92, 0.8624999999999999, 0.78, 0.89, 1.0175, 1.095, 1.12, 1.3775,
1.3725, 1.56, 0.725, 0.6025, 0.6325, 0.6, 0.585, 0.5575, 0.6475, 0.5225000000000001,
0.4975, 0.7525, 0.8575, 0.8350000000000001, 0.8475, 0.78, 8.219999999999999,
8.315000000000001, 8.3325, 8.6425, 11.11, 24.5, 24.5975, 24.657500000000002, 24.7425,
24.68, 24.6575, 25.325, 25.4125, 25.482499999999998, 25.5325, 25.565, 25.4475,
25.477500000000003, 25.545, 25.32, 24.7625, 24.805, 24.862499999999997, 24.8475, 24.68,
24.69, 24.732499999999998, 0.47000000000000003, 7.5975, 7.625, 7.6225, 7.415, 7.545,
7.5600000000000005, 7.550000000000001, 7.49, 0.74, 0.73, 0.7175, 0.745, 2.205, 2.2375,
2.27, 2.275, 2.2475, 2.2175, 2.255, 2.2375, 2.2375000000000003, 2.225, 2.205,
2.1975000000000002, 2.1825, 2.255, 2.3625, 2.3475, 2.3475, 2.2875, 2.3175, 2.39, 2.36,
2.3325, 2.295, 2.3375, 2.35, 2.3575, 2.36, 2.3125, 2.3425000000000002, 2.42, 2.3375, 2.33,
2.27, 2.23, 2.1475, 2.1575, 2.1475, 2.17, 2.1174999999999997, 2.105, 2.1, 2.09, 2.1075,
2.1350000000000002, 2.1125, 2.1325000000000003, 2.215, 2.1875, 2.26, 2.2725, 2.2125, 2.2,
2.21, 2.1625, 2.155, 2.12, 2.08, 2.0925000000000002, 2.0775, 2.095, 2.15, 2.185,
2.1774999999999998, 2.205, 2.21, 2.2125, 2.2675, 2.2, 2.18, 2.15, 2.13, 2.15, 2.1075,
2.0875, 2.12, 2.14, 2.1175, 2.1075, 2.1525, 2.185, 2.1799999999999997, 2.1675, 2.19, 2.215,
2.2475, 2.2475, 2.2225, 2.21, 2.1925, 2.1925, 2.185, 2.1975000000000002, 2.245,
2.2975000000000003, 2.2625, 2.2825, 2.2975, 2.3375, 2.3024999999999998, 2.2575, 2.2775,
2.3, 2.365, 2.38, 2.3674999999999997, 2.3875, 2.3449999999999998, 2.305, 2.3125,
2.3049999999999997, 2.2475, 2.2875, 2.3225000000000002]
second_grid_unamb_mdp_1 = [80.0, 74.29, 77.59, 82.28, 72.28, 73.83, 69.84, 72.54, 70.2, 75.62, 77.5, 79.04, 75.28,
58.46, 62.56, 65.2, 71.81, 73.68, 75.95, 74.39, 73.18, 74.55, 68.44, 73.31, 74.78, 75.7,
70.35, 68.13, 71.83, 72.03, 73.4, 73.22, 76.6, 75.23, 75.69, 76.59, 77.1, 76.51, 77.85,
76.79, 71.01, 76.29, 79.57, 80.86, 80.0, 79.92, 81.42, 77.44, 78.46, 79.47, 80.28, 80.89,
81.5, 82.11, 82.32, 82.52, 83.13, 83.54, 83.94, 84.96, 85.16, 85.57, 86.38, 86.99, 86.59,
86.99, 86.99, 86.14, 86.55, 86.75, 87.15, 87.15, 88.15, 88.35, 88.35, 88.55, 88.76, 88.76,
89.16, 89.36, 89.38, 89.78, 90.18, 90.38, 90.58, 90.58, 90.58, 90.58, 90.98, 91.38, 91.58,
91.58, 89.88, 90.08, 90.28, 91.07, 91.07, 91.07, 91.27, 91.67, 91.67, 87.52, 87.52, 88.11,
88.11, 88.11, 88.11, 88.3, 88.5, 88.5, 88.69, 88.69, 89.11, 89.3, 88.91, 88.91, 89.3, 89.3,
89.3, 89.3, 89.3, 89.3, 89.3, 89.3, 89.3, 89.3, 89.69, 89.69, 90.08, 90.08, 90.08, 90.08,
90.08, 90.08, 90.08, 90.08, 90.08, 90.08, 90.08, 90.27, 90.27, 90.66, 86.83, 87.02, 87.21,
87.4, 87.6, 87.98, 88.17, 88.17, 87.98, 88.17, 88.17, 88.17, 88.36, 88.55, 88.55, 88.55,
88.55, 88.55, 88.55, 88.74, 88.74, 88.55, 88.74, 88.74, 89.12, 89.12, 89.12, 89.5, 89.5,
89.5, 89.31, 89.31, 89.31, 89.31, 89.31, 89.31, 89.31, 89.31, 89.31, 89.5, 89.5, 89.69,
89.89, 89.69, 89.69, 89.69, 89.69, 89.69, 89.69, 89.69, 89.69, 89.89, 89.89, 89.89, 89.89,
89.89, 89.89, 89.89, 89.89, 89.89, 92.37, 92.61, 92.61, 92.61, 92.61, 92.61, 92.61, 92.61,
92.61, 92.61, 92.8, 92.8, 92.8, 92.8, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0,
92.8, 92.94, 93.19, 93.19, 93.19, 93.19, 93.39, 93.39, 93.39, 93.39, 93.39, 93.39, 93.39,
93.39, 93.39, 93.58, 93.39, 93.97, 93.97, 93.97, 93.97, 93.77, 93.77, 93.77, 93.77]
second_grid_max_err_mdp_2 = [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 97.43, 97.43, 81.02, 82.12, 90.06, 87.85, 77.77, 55.47, 80.41, 71.61, 70.24,
63.23, 63.43, 65.83, 64.43, 64.26, 61.59, 62.82, 62.9, 60.64, 59.49, 59.59, 59.61, 59.61,
58.67, 57.58, 58.2, 5.67, 5.21, 5.06, 4.63, 4.91, 5.2, 4.66, 4.27, 4.42, 4.08, 4.32, 4.66,
4.68, 4.53, 4.12, 4.6, 4.37, 4.31, 4.1, 3.87, 4.35, 4.54, 4.59, 4.75, 4.72, 3.31, 3.11,
2.85, 2.76, 2.41, 2.24, 3.6, 3.51, 3.49, 3.6, 3.57, 3.57, 3.69, 3.74, 3.64, 3.58, 3.56,
3.46, 3.4, 3.34, 3.3, 3.36, 3.19, 3.23, 3.21, 3.29, 3.21, 3.4, 3.42, 3.39, 3.33, 3.29,
3.24, 3.88, 3.92, 4.06, 4.05, 4.08, 4.05, 4.04, 3.93, 4.08, 4.17, 4.29, 4.28, 4.3, 4.38,
4.54, 4.59, 4.55, 4.66, 4.62, 4.78, 4.71, 4.73, 4.72, 4.8, 4.74, 4.81, 4.85, 4.96, 4.97,
4.93, 4.89, 4.9, 4.9, 4.88, 4.77, 4.76, 4.83, 4.81, 4.77, 4.73, 4.66, 4.61, 4.58, 4.52,
4.44, 4.35, 4.35, 4.38, 4.42, 4.3, 4.38, 4.32, 4.35, 4.37, 4.39, 4.44, 4.47, 4.32, 4.3,
4.31, 4.31, 4.28, 5.13, 5.19, 5.23, 5.24, 5.23, 5.23, 5.23, 5.16, 5.16, 5.21, 5.32, 5.29,
5.32, 5.47, 5.42, 5.4, 5.54, 5.59, 5.52, 1.74, 0.6, 0.59, 0.58, 0.59, 60.12, 60.62, 1.07,
1.27, 0.58, 0.56, 0.76, 0.88, 0.76, 0.71, 0.94, 0.79, 0.67, 0.91, 0.98, 0.85, 0.76, 0.74,
0.73, 0.67, 0.64, 0.7, 0.81, 0.9, 0.87, 0.97, 0.89, 1.0, 0.93, 0.96, 1.04, 1.15, 1.1, 1.09,
1.11, 1.09, 1.07, 1.14, 1.13, 1.03, 0.96, 0.87, 0.85, 1.07, 1.01, 0.95, 0.9, 0.83, 1.02,
0.87, 0.75, 0.7, 0.67, 1.53, 0.9, 0.83, 0.78, 0.97]
second_grid_avr_err_mdp_2 = [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 61.695, 66.6725, 54.12, 39.347500000000004, 42.305, 37.8925, 26.1875,
26.7275, 34.762499999999996, 34.05, 38.72, 36.715, 28.905, 23.6325, 22.150000000000002,
22.365000000000002, 20.5425, 21.535, 21.265, 16.767500000000002, 16.5425, 16.575, 16.445,
16.5625, 16.3325, 15.9725, 16.075, 2.985, 2.8025, 2.1599999999999997, 1.89,
1.7650000000000001, 1.8125, 1.705, 1.3224999999999998, 1.3675, 1.185, 1.23, 1.3, 1.2925,
1.615, 1.5875000000000001, 1.67, 1.55, 1.595, 1.4675, 1.6125, 2.215, 2.3375, 2.5225, 2.54,
2.455, 1.7725, 1.825, 1.5775, 1.3775, 1.22, 1.1400000000000001, 1.5050000000000001, 1.42,
1.4125, 1.505, 1.5, 1.52, 1.5625, 1.58, 1.4975, 1.3825, 1.36, 1.195, 1.0375, 1.085, 1.02,
1.09, 1.155, 1.12, 1.2675, 1.2825, 1.28, 1.0725, 1.205, 1.2925, 1.28, 1.2675, 1.2925, 1.4,
1.33, 1.3275, 1.3225, 1.3225, 1.325, 1.3125, 1.335, 1.5050000000000001, 1.47, 1.41, 1.385,
1.3675, 1.4075, 1.51, 1.5425, 1.4825, 1.5975000000000001, 1.5350000000000001,
1.6800000000000002, 1.575, 1.57, 1.5725, 1.5699999999999998, 1.53, 1.6024999999999998,
1.6749999999999998, 1.705, 1.7275, 1.6624999999999999, 1.5775, 1.5750000000000002, 1.53,
1.5, 1.4474999999999998, 1.4425, 1.4925, 1.505, 1.4725, 1.4300000000000002,
1.3900000000000001, 1.36, 1.33, 1.2674999999999998, 1.26, 1.2374999999999998, 1.22, 1.23,
1.24, 1.205, 1.2349999999999999, 1.2175, 1.2225, 1.2275, 1.2525, 1.2825000000000002, 1.29,
1.25, 1.26, 1.315, 1.3375, 1.3425, 1.54, 1.5425, 1.5525, 1.5225, 1.5050000000000001,
1.5075, 1.485, 1.46, 1.46, 1.4725, 1.49, 1.4875, 1.5150000000000001, 1.555, 1.53,
1.5025000000000002, 1.5375, 1.57, 1.5299999999999998, 0.595, 0.305, 0.295, 0.285, 0.285,
16.2025, 16.419999999999998, 0.435, 0.4775, 0.3575, 0.385, 0.505, 0.5075, 0.4775,
0.46499999999999997, 0.57, 0.4925, 0.44000000000000006, 0.385, 0.3475, 0.325, 0.28, 0.285,
0.345, 0.3425, 0.355, 0.365, 0.36, 0.3725, 0.3725, 0.355, 0.3, 0.3325, 0.3025, 0.31, 0.305,
0.3075, 0.335, 0.3325, 0.36750000000000005, 0.35000000000000003, 0.3775, 0.43, 0.4225,
0.405, 0.4125, 0.36, 0.4, 0.435, 0.355, 0.32, 0.2925, 0.295, 0.3575, 0.335, 0.275, 0.255,
0.23500000000000001, 0.595, 0.3275, 0.385, 0.395, 0.445]
second_grid_unamb_mdp_2 = [80.0, 71.43, 70.97, 73.75, 73.0, 66.67, 59.44, 64.24, 69.81, 67.43, 64.1, 64.84, 72.52,
74.37, 72.84, 69.85, 68.44, 72.26, 70.99, 71.39, 72.36, 68.88, 74.67, 75.73, 75.82, 72.3,
74.24, 77.39, 77.05, 73.17, 77.37, 78.85, 76.05, 77.05, 72.5, 79.61, 81.44, 82.24, 82.63,
83.23, 83.03, 83.43, 83.63, 83.63, 83.83, 82.84, 84.22, 85.4, 85.8, 86.79, 87.57, 88.36,
89.55, 89.55, 90.34, 90.34, 90.53, 90.53, 90.73, 90.93, 91.12, 91.12, 91.32, 91.32, 91.32,
91.72, 91.72, 91.91, 92.31, 92.5, 92.31, 92.31, 92.5, 93.1, 93.1, 93.1, 93.49, 93.29, 92.25,
93.69, 93.69, 93.89, 93.89, 93.89, 93.89, 93.89, 93.89, 94.08, 89.65, 89.65, 89.45, 91.35,
93.58, 94.67, 94.67, 94.87, 94.87, 94.87, 94.87, 94.87, 94.87, 94.87, 94.87, 94.87, 94.87,
95.07, 95.07, 95.07, 95.07, 95.07, 95.07, 95.07, 95.07, 95.07, 95.07, 95.07, 95.07, 95.07,
95.07, 95.07, 95.07, 95.07, 95.07, 95.07, 95.07, 95.07, 95.07, 94.87, 90.47, 90.47, 90.47,
90.66, 90.66, 90.86, 90.86, 90.86, 90.86, 91.05, 91.05, 91.05, 91.25, 91.44, 91.25, 88.31,
88.34, 90.25, 91.63, 91.83, 92.02, 92.02, 92.02, 92.02, 92.02, 92.02, 91.83, 90.63, 92.02,
92.02, 92.02, 92.02, 92.02, 92.02, 92.02, 92.02, 92.02, 92.02, 92.02, 92.22, 92.41, 92.61,
92.41, 91.73, 92.22, 91.01, 92.41, 92.8, 92.8, 92.8, 92.8, 92.8, 92.8, 93.0, 92.8, 92.8,
93.0, 92.8, 92.8, 92.13, 93.19, 93.19, 93.0, 91.94, 89.6, 89.98, 90.17, 90.36, 90.36, 90.55,
90.36, 90.36, 90.36, 91.68, 92.51, 90.55, 92.44, 93.28, 93.28, 93.47, 93.47, 93.28, 91.3,
91.3, 92.63, 93.67, 93.67, 93.47, 93.67, 93.67, 93.67, 93.86, 93.67, 93.67, 93.86, 93.67,
93.86, 93.86, 93.86, 93.86, 94.05, 93.86, 93.86, 93.86, 94.05, 94.05, 94.05, 94.05, 94.05,
94.24, 94.24, 94.24, 94.24, 94.24, 94.24, 94.24, 94.05, 93.56, 94.43, 94.24, 94.43, 94.05]
def single_plot():
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
r = list(range(len(second_grid_max_err_2)))
# plt.plot(r, second_grid_max_err_2, label = 'max_error_%')
# plt.plot(r, second_grid_avr_err_2, label = 'avr_error_%')
# plt.plot(r, second_grid_unamb_2, label='unamb_%')
plt.plot(r, mqtt_max_err_1, label='Maximum Error')
plt.plot(r, mqtt_avr_err_1, label='Average Error')
plt.plot(r, mqtt_unamb_1, label='Unambiguous Rows')
# second_grid_avr_err_2 looks nice
# plt.plot(r, [100 - r for r in second_grid_unamb_2], label='unamb_%')
plt.ylabel('Percentage')
plt.xlabel('Learning Round')
plt.title('MQTT')
plt.grid(axis='y')
plt.legend()
#plt.show()
plt.savefig("stoppingAndUnamb_mqtt.pgf")
import tikzplotlib
tikzplotlib.save("stoppingAndUnamb_mqtt.tex")
def side_by_side():
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
r = list(range(len(second_grid_max_err_2)))
# plt.plot(r, second_grid_max_err_2, label = 'max_error_%')
# plt.plot(r, second_grid_avr_err_2, label = 'avr_error_%')
# plt.plot(r, second_grid_unamb_2, label='unamb_%')
fig, (plt1, plt2) = plt.subplots(1, 2, figsize=(10, 3))
plt1.plot_side_by_side()
plt1.plot_side_by_side()
plt1.plot_side_by_side()
# second_grid_avr_err_2 looks nice
# plt.plot(r, [100 - r for r in second_grid_unamb_2], label='unamb_%')
plt1.set_ylabel('Percentage')
plt1.set_xlabel('Learning Round')
plt1.set_title('72 State Gridworld')
plt1.grid(axis='y')
plt1.legend()
plt2.plot_side_by_side()
plt2.plot_side_by_side()
plt2.plot_side_by_side()
# second_grid_avr_err_2 looks nice
# plt.plot(r, [100 - r for r in second_grid_unamb_2], label='unamb_%')
plt2.set_ylabel('Percentage')
plt2.set_xlabel('Learning Round')
plt2.set_title('MQTT')
plt2.grid(axis='y')
plt2.legend()
# plt.show()
fig.tight_layout()
fig.savefig("stoppingAndUnamb2plots.pgf")
import tikzplotlib
tikzplotlib.save("stoppingAndUnamb2plots.tex")
if __name__ == '__main__':
side_by_side()
gitextract_arfvw_hu/
├── .gitattributes
├── .github/
│ └── workflows/
│ ├── codeql-analysis.yml
│ └── python-app.yml
├── .gitignore
├── Benchmarking/
│ ├── Benchmark_ErrorStop.py
│ ├── CompleteStochasticBenchmarking.py
│ ├── StochasticAlgComparison.py
│ ├── StochasticBenchmarkingWPrism.py
│ ├── StopWithErorrRate.py
│ ├── all_results.pickle
│ ├── benchmark.py
│ ├── benchmark_alphabet_increase.py
│ ├── benchmark_size_increase.py
│ ├── cex_processing_benchmark.py
│ ├── compare_lstar_and_kv.py
│ ├── error_benchmark_statistics.py
│ ├── evaluate_l_star_configurations.py
│ ├── fm_benchmark.py
│ ├── fm_plots.py
│ ├── generate_plots.py
│ ├── json_lbt.py
│ ├── papni_sequences.pickle
│ ├── papni_vs_rpni_benchmarking.py
│ ├── passive_mdp_vs_smm.py
│ ├── prism_eval_props/
│ │ ├── bluetooth.props
│ │ ├── emqtt_two_client.props
│ │ ├── first_eval.props
│ │ ├── second_eval.props
│ │ ├── shared_coin_eval.props
│ │ ├── slot_machine_eval.props
│ │ └── tcp_eval.props
│ ├── rpni_papni_memory_footrpint.py
│ ├── stochastic_benchmarking/
│ │ ├── Benchmark_ErrorStop.py
│ │ ├── CompleteStochasticBenchmarking.py
│ │ ├── StochasticBenchmarkingWPrism.py
│ │ ├── passive_mdp_vs_smm.py
│ │ ├── plot_error_steps.py
│ │ ├── stochastic_benchmark_random_automata.py
│ │ ├── strategy_comp.py
│ │ └── unamb_error_plot.py
│ ├── unamb_error_plot.py
│ └── vpa_benchmarking/
│ └── benchmark_vpa.py
├── DotModels/
│ ├── Angluin_Mealy.dot
│ ├── Angluin_Moore.dot
│ ├── Bluetooth/
│ │ ├── CC2640R2-no-feature-req.dot
│ │ ├── CC2640R2-no-feature-req_stochastic.dot
│ │ ├── CC2640R2-no-pairing-req.dot
│ │ ├── CC2650.dot
│ │ ├── CYBLE-416045-02.dot
│ │ ├── CYBLE-416045-02_Crash_No_Response_stochastic.dot
│ │ ├── CYW43455.dot
│ │ ├── CYW43455_stochastic.dot
│ │ ├── bluetooth_model.dot
│ │ ├── bluetooth_reduced.dot
│ │ ├── cc2652r1.dot
│ │ ├── convert_to_stochastic.py
│ │ └── nRF52832.dot
│ ├── MDPs/
│ │ ├── bluetooth.dot
│ │ ├── faulty_car_alarm.dot
│ │ ├── first_grid.dot
│ │ ├── mqtt.dot
│ │ ├── second_grid.dot
│ │ ├── shared_coin.dot
│ │ ├── slot_machine.dot
│ │ └── tcp.dot
│ ├── MQTT/
│ │ ├── ActiveMQ__two_client_will_retain.dot
│ │ ├── VerneMQ__two_client_will_retain.dot
│ │ ├── emqtt__two_client_will_retain.dot
│ │ ├── hbmqtt__two_client_will_retain.dot
│ │ └── mosquitto__two_client_will_retain.dot
│ ├── SimpleABC/
│ │ ├── simple_abc_dfa.dot
│ │ ├── simple_abc_mealy.dot
│ │ └── simple_abc_moore.dot
│ ├── TCP/
│ │ ├── TCP_Linux_Client.dot
│ │ ├── tcp_server_bsd_trans.dot
│ │ ├── tcp_server_ubuntu_trans.dot
│ │ └── tcp_server_windows_trans.dot
│ ├── TLS/
│ │ ├── JSSE_1.8.0_25_server_regular.dot
│ │ ├── NSS_3.17.4_server_regular.dot
│ │ ├── OpenSSL_1.0.2_server_regular.dot
│ │ ├── RSA_BSAFE_C_4.0.4_server_regular.dot
│ │ └── miTLS_0.1.3_server_regular.dot
│ ├── arithmetics.dot
│ ├── car_alarm.dot
│ ├── coffee_mealy.dot
│ ├── coffee_moore.dot
│ ├── five_clients_mqtt_abstracted_onfsm.dot
│ ├── mooreModel.dot
│ ├── onfsm_0.dot
│ ├── onfsm_1.dot
│ ├── onfsm_2.dot
│ ├── onfsm_3.dot
│ ├── onfsm_4.dot
│ ├── onfsm_5.dot
│ └── tomitaGrammars/
│ ├── tomita_1.dot
│ ├── tomita_2.dot
│ ├── tomita_3.dot
│ ├── tomita_4.dot
│ ├── tomita_5.dot
│ ├── tomita_6.dot
│ └── tomita_7.dot
├── Examples.py
├── LICENCE.txt
├── README.md
├── aalpy/
│ ├── SULs/
│ │ ├── AutomataSUL.py
│ │ ├── PyMethodSUL.py
│ │ ├── RegexSUL.py
│ │ ├── TomitaSUL.py
│ │ └── __init__.py
│ ├── __init__.py
│ ├── automata/
│ │ ├── Dfa.py
│ │ ├── MarkovChain.py
│ │ ├── Mdp.py
│ │ ├── MealyMachine.py
│ │ ├── MooreMachine.py
│ │ ├── NonDeterministicMooreMachine.py
│ │ ├── Onfsm.py
│ │ ├── Sevpa.py
│ │ ├── StochasticMealyMachine.py
│ │ ├── Vpa.py
│ │ └── __init__.py
│ ├── base/
│ │ ├── Automaton.py
│ │ ├── CacheTree.py
│ │ ├── Oracle.py
│ │ ├── SUL.py
│ │ └── __init__.py
│ ├── learning_algs/
│ │ ├── __init__.py
│ │ ├── adaptive/
│ │ │ ├── AdaptiveLSharp.py
│ │ │ ├── AdaptiveObservationTree.py
│ │ │ ├── StateMatching.py
│ │ │ └── __init__.py
│ │ ├── deterministic/
│ │ │ ├── ADS.py
│ │ │ ├── Apartness.py
│ │ │ ├── ClassificationTree.py
│ │ │ ├── CounterExampleProcessing.py
│ │ │ ├── KV.py
│ │ │ ├── LSharp.py
│ │ │ ├── LStar.py
│ │ │ ├── ObservationTable.py
│ │ │ ├── ObservationTree.py
│ │ │ └── __init__.py
│ │ ├── deterministic_passive/
│ │ │ ├── ClassicRPNI.py
│ │ │ ├── GsmRPNI.py
│ │ │ ├── PAPNI.py
│ │ │ ├── RPNI.py
│ │ │ ├── __init__.py
│ │ │ ├── active_RPNI.py
│ │ │ └── rpni_helper_functions.py
│ │ ├── general_passive/
│ │ │ ├── GeneralizedStateMerging.py
│ │ │ ├── GsmAlgorithms.py
│ │ │ ├── GsmNode.py
│ │ │ ├── Instrumentation.py
│ │ │ ├── ScoreFunctionsGSM.py
│ │ │ └── __init__.py
│ │ ├── non_deterministic/
│ │ │ ├── AbstractedOnfsmLstar.py
│ │ │ ├── AbstractedOnfsmObservationTable.py
│ │ │ ├── NonDeterministicSULWrapper.py
│ │ │ ├── OnfsmLstar.py
│ │ │ ├── OnfsmObservationTable.py
│ │ │ ├── TraceTree.py
│ │ │ └── __init__.py
│ │ ├── stochastic/
│ │ │ ├── DifferenceChecker.py
│ │ │ ├── SamplingBasedObservationTable.py
│ │ │ ├── StochasticCexProcessing.py
│ │ │ ├── StochasticLStar.py
│ │ │ ├── StochasticTeacher.py
│ │ │ └── __init__.py
│ │ └── stochastic_passive/
│ │ ├── ActiveAleriga.py
│ │ ├── Alergia.py
│ │ ├── CompatibilityChecker.py
│ │ ├── FPTA.py
│ │ └── __init__.py
│ ├── oracles/
│ │ ├── BreadthFirstExplorationEqOracle.py
│ │ ├── CacheBasedEqOracle.py
│ │ ├── KWayStateCoverageEqOracle.py
│ │ ├── KWayTransitionCoverageEqOracle.py
│ │ ├── PacOracle.py
│ │ ├── PerfectKnowledgeEqOracle.py
│ │ ├── ProvidedSequencesOracleWrapper.py
│ │ ├── RandomWalkEqOracle.py
│ │ ├── RandomWordEqOracle.py
│ │ ├── StatePrefixEqOracle.py
│ │ ├── TransitionFocusOracle.py
│ │ ├── UserInputEqOracle.py
│ │ ├── WMethodEqOracle.py
│ │ ├── WpMethodEqOracle.py
│ │ └── __init__.py
│ ├── paths.py
│ └── utils/
│ ├── AutomatonGenerators.py
│ ├── BenchmarkSULs.py
│ ├── BenchmarkSevpaModels.py
│ ├── BenchmarkVpaModels.py
│ ├── DataHandler.py
│ ├── FileHandler.py
│ ├── HelperFunctions.py
│ ├── ModelChecking.py
│ ├── Sampling.py
│ └── __init__.py
├── docs/
│ ├── README.md
│ ├── _config.yml
│ ├── google306875680a34d740.html
│ └── instructions.txt
├── jAlergia/
│ ├── alergia.jar
│ └── exampleMdpData.txt
├── notebooks/
│ ├── Abstracted_Non-Det_FSM.ipynb
│ ├── AngluinExample.ipynb
│ ├── MDP_Example.ipynb
│ ├── MDP_and_SMM_Example.ipynb
│ ├── ONFSM_Example.ipynb
│ ├── RandomMealyExample.ipynb
│ ├── RegexExample.ipynb
│ └── Stochstic_Examples.ipynb
├── setup.py
└── tests/
├── oracles/
│ ├── test_baseOracle.py
│ └── test_kWayTransitionCoverageEqOracle.py
├── test_charSet.py
├── test_deterministic.py
├── test_deterministic_passive.py
├── test_file_operations.py
├── test_non_deterministic.py
├── test_rwpmethod_oracle.py
├── test_stochastic.py
├── test_wmethod_oracle.py
├── test_wpmethod_oracle.py
└── tests_imports.py
SYMBOL INDEX (996 symbols across 105 files)
FILE: Benchmarking/fm_plots.py
function plot_error (line 3) | def plot_error():
function plot_benchmarks (line 70) | def plot_benchmarks():
FILE: Benchmarking/generate_plots.py
function plot_increasing_size_exp (line 7) | def plot_increasing_size_exp():
function plot_increasing_alphabeth_exp (line 52) | def plot_increasing_alphabeth_exp():
function plot_together (line 94) | def plot_together():
function plot_together (line 178) | def plot_together():
function plot_together_learnlib_comp (line 264) | def plot_together_learnlib_comp():
function plot_runtime_steps (line 343) | def plot_runtime_steps():
FILE: Benchmarking/json_lbt.py
function is_valid_json (line 12) | def is_valid_json(s):
function to_json_string (line 20) | def to_json_string(json_tuple):
function generate_random_json (line 30) | def generate_random_json(max_depth=3, max_elements=5):
function corrupt_json (line 88) | def corrupt_json(symbols):
function generate_dataset (line 237) | def generate_dataset(num_sequences):
function validate_string_with_json_parser (line 253) | def validate_string_with_json_parser(json_str, json_parser):
FILE: Benchmarking/papni_vs_rpni_benchmarking.py
function calculate_f1_score (line 16) | def calculate_f1_score(precision, recall):
function calculate_precision_recall_f1 (line 22) | def calculate_precision_recall_f1(true_positives, false_positives, false...
function compare_rpni_and_papni (line 30) | def compare_rpni_and_papni(test_data, rpni_model, papni_model):
function get_sequences_from_active_sevpa (line 65) | def get_sequences_from_active_sevpa(model, verbose=False):
function split_data_to_learning_and_testing (line 100) | def split_data_to_learning_and_testing(data, learning_to_test_ratio=0.5):
function run_experiment (line 126) | def run_experiment(experiment_id,
function run_experiments_multiple_times (line 169) | def run_experiments_multiple_times(test_models, num_times, learning_to_t...
function test_papni_based_on_sevpa_dataset (line 195) | def test_papni_based_on_sevpa_dataset():
FILE: Benchmarking/passive_mdp_vs_smm.py
function writeSamplesToFile (line 18) | def writeSamplesToFile(samples, path="alergiaSamples.txt"):
function deleteSampleFile (line 33) | def deleteSampleFile(path="alergiaSamples.txt"):
FILE: Benchmarking/stochastic_benchmarking/passive_mdp_vs_smm.py
function writeSamplesToFile (line 18) | def writeSamplesToFile(samples, path="alergiaSamples.txt"):
function deleteSampleFile (line 33) | def deleteSampleFile(path="alergiaSamples.txt"):
FILE: Benchmarking/stochastic_benchmarking/plot_error_steps.py
function crash_plot (line 18) | def crash_plot():
function no_response (line 42) | def no_response():
function side_by_side (line 61) | def side_by_side():
FILE: Benchmarking/stochastic_benchmarking/stochastic_benchmark_random_automata.py
function learn (line 15) | def learn(mdp, type):
FILE: Benchmarking/stochastic_benchmarking/strategy_comp.py
function learn (line 23) | def learn(strategy):
function plot (line 65) | def plot():
function plot_side_by_side (line 90) | def plot_side_by_side():
FILE: Benchmarking/stochastic_benchmarking/unamb_error_plot.py
function single_plot (line 427) | def single_plot():
function side_by_side (line 463) | def side_by_side():
FILE: Benchmarking/unamb_error_plot.py
function single_plot (line 427) | def single_plot():
function side_by_side (line 463) | def side_by_side():
FILE: Benchmarking/vpa_benchmarking/benchmark_vpa.py
function state_increasing (line 15) | def state_increasing():
function alphabet_increasing (line 71) | def alphabet_increasing():
function alphabet_increasing_variable (line 126) | def alphabet_increasing_variable():
function benchmark_vpa_dfa (line 193) | def benchmark_vpa_dfa():
FILE: DotModels/Bluetooth/convert_to_stochastic.py
class ModelSUL (line 14) | class ModelSUL(SUL):
method __init__ (line 15) | def __init__(self, model):
method pre (line 20) | def pre(self):
method post (line 24) | def post(self):
method step (line 27) | def step(self, letter):
function to_mdp (line 37) | def to_mdp():
function to_smm (line 72) | def to_smm():
FILE: Examples.py
function random_deterministic_model_example (line 1) | def random_deterministic_model_example():
function angluin_seminal_example (line 25) | def angluin_seminal_example():
function angluin_seminal_example_lsharp (line 48) | def angluin_seminal_example_lsharp():
function tomita_example (line 72) | def tomita_example(tomita_number=3):
function regex_example (line 93) | def regex_example(regex, alphabet):
function learn_date_validator (line 115) | def learn_date_validator():
function bluetooth_Lsharp (line 168) | def bluetooth_Lsharp():
function bluetooth_adaptive_Lsharp (line 186) | def bluetooth_adaptive_Lsharp():
function random_deterministic_example_with_provided_sequences (line 210) | def random_deterministic_example_with_provided_sequences():
function big_input_alphabet_example (line 238) | def big_input_alphabet_example():
function random_onfsm_example (line 280) | def random_onfsm_example(num_states, input_size, output_size, n_sampling):
function random_mdp_example (line 306) | def random_mdp_example():
function learn_python_class (line 325) | def learn_python_class():
function mqtt_example (line 352) | def mqtt_example():
function onfsm_mealy_paper_example (line 392) | def onfsm_mealy_paper_example():
function multi_client_mqtt_example (line 415) | def multi_client_mqtt_example():
function abstracted_onfsm_example (line 519) | def abstracted_onfsm_example():
function faulty_coffee_machine_mdp_example (line 547) | def faulty_coffee_machine_mdp_example(automaton_type='mdp'):
function weird_coffee_machine_mdp_example (line 573) | def weird_coffee_machine_mdp_example():
function benchmark_stochastic_example (line 597) | def benchmark_stochastic_example(example, automaton_type='smm', n_c=20, ...
function custom_stochastic_example (line 640) | def custom_stochastic_example(stochastic_machine, learning_type='smm', m...
function learn_stochastic_system_and_do_model_checking (line 669) | def learn_stochastic_system_and_do_model_checking(example, automaton_typ...
function alergia_mdp_example (line 695) | def alergia_mdp_example():
function alergia_smm_example (line 725) | def alergia_smm_example():
function alergia_mc_example_with_loaded_data (line 754) | def alergia_mc_example_with_loaded_data():
function jAlergiaExample (line 800) | def jAlergiaExample():
function active_alergia_example (line 817) | def active_alergia_example(example='first_grid'):
function rpni_example (line 845) | def rpni_example():
function rpni_check_model_example (line 860) | def rpni_check_model_example():
function rpni_mealy_example (line 892) | def rpni_mealy_example():
function random_active_rpni_example (line 920) | def random_active_rpni_example():
function compare_stochastic_and_non_deterministic_learning (line 950) | def compare_stochastic_and_non_deterministic_learning(example='first_gri...
function learning_context_free_grammar_example (line 995) | def learning_context_free_grammar_example():
function arithmetic_expression_sevpa_learning (line 1021) | def arithmetic_expression_sevpa_learning():
function benchmark_sevpa_learning (line 1070) | def benchmark_sevpa_learning():
function random_sevpa_learning (line 1096) | def random_sevpa_learning():
function passive_vpa_learning_on_lists (line 1122) | def passive_vpa_learning_on_lists():
function passive_vpa_learning_arithmetics (line 1149) | def passive_vpa_learning_arithmetics():
function passive_vpa_learning_on_all_benchmark_models (line 1164) | def passive_vpa_learning_on_all_benchmark_models():
function gsm_rpni (line 1185) | def gsm_rpni():
function gsm_edsm (line 1198) | def gsm_edsm():
function gsm_likelihood_ratio (line 1220) | def gsm_likelihood_ratio():
function example_Alergia_extension (line 1254) | def example_Alergia_extension():
function gsm_IOAlergia_domain_knowldege (line 1295) | def gsm_IOAlergia_domain_knowldege():
function k_tails_example (line 1331) | def k_tails_example():
FILE: aalpy/SULs/AutomataSUL.py
class AutomatonSUL (line 5) | class AutomatonSUL(SUL):
method __init__ (line 6) | def __init__(self, automaton: Automaton):
method pre (line 10) | def pre(self):
method step (line 13) | def step(self, letter=None):
method post (line 16) | def post(self):
FILE: aalpy/SULs/PyMethodSUL.py
class FunctionDecorator (line 4) | class FunctionDecorator:
method __init__ (line 9) | def __init__(self, function, args=None):
method __repr__ (line 24) | def __repr__(self):
class PyClassSUL (line 30) | class PyClassSUL(SUL):
method __init__ (line 34) | def __init__(self, python_class):
method pre (line 44) | def pre(self):
method post (line 50) | def post(self):
method step (line 53) | def step(self, letter):
FILE: aalpy/SULs/RegexSUL.py
class RegexSUL (line 5) | class RegexSUL(SUL):
method __init__ (line 10) | def __init__(self, regex: str):
method pre (line 15) | def pre(self):
method post (line 19) | def post(self):
method step (line 23) | def step(self, letter):
FILE: aalpy/SULs/TomitaSUL.py
class TomitaSUL (line 6) | class TomitaSUL(SUL):
method __init__ (line 12) | def __init__(self, tomita_level_fun):
method pre (line 20) | def pre(self):
method post (line 24) | def post(self):
method step (line 28) | def step(self, letter):
function tomita_1 (line 37) | def tomita_1(word):
function tomita_2 (line 41) | def tomita_2(word):
function tomita_3 (line 45) | def tomita_3(word):
function not_tomita_3 (line 51) | def not_tomita_3(word):
function tomita_4 (line 55) | def tomita_4(word):
function tomita_5 (line 59) | def tomita_5(word):
function tomita_6 (line 63) | def tomita_6(word):
function tomita_7 (line 67) | def tomita_7(word):
FILE: aalpy/automata/Dfa.py
class DfaState (line 7) | class DfaState(AutomatonState, Generic[InputType]):
method __init__ (line 12) | def __init__(self, state_id, is_accepting=False):
method output (line 18) | def output(self):
class Dfa (line 21) | class Dfa(DeterministicAutomaton[DfaState[InputType]]):
method __init__ (line 26) | def __init__(self, initial_state: DfaState, states):
method step (line 29) | def step(self, letter):
method compute_characterization_set (line 43) | def compute_characterization_set(self, char_set_init=None, online_suff...
method compute_output_seq (line 49) | def compute_output_seq(self, state, sequence):
method execute_sequence (line 54) | def execute_sequence(self, origin_state, seq):
method to_state_setup (line 61) | def to_state_setup(self):
method from_state_setup (line 74) | def from_state_setup(state_setup : dict, **kwargs):
FILE: aalpy/automata/MarkovChain.py
class McState (line 8) | class McState(AutomatonState, Generic[OutputType]):
method __init__ (line 9) | def __init__(self, state_id, output):
class MarkovChain (line 16) | class MarkovChain(Automaton[McState[OutputType]]):
method __init__ (line 19) | def __init__(self, initial_state, states: list):
method reset_to_initial (line 22) | def reset_to_initial(self):
method step (line 25) | def step(self, letter=None):
method step_to (line 48) | def step_to(self, input):
method from_state_setup (line 66) | def from_state_setup(state_setup: dict, **kwargs):
method to_state_setup (line 69) | def to_state_setup(self):
FILE: aalpy/automata/Mdp.py
class MdpState (line 9) | class MdpState(AutomatonState, Generic[InputType, OutputType]):
method __init__ (line 13) | def __init__(self, state_id, output=None):
class Mdp (line 20) | class Mdp(Automaton[MdpState[InputType, OutputType]]):
method __init__ (line 23) | def __init__(self, initial_state: MdpState, states: list):
method reset_to_initial (line 26) | def reset_to_initial(self):
method step (line 29) | def step(self, letter):
method step_to (line 51) | def step_to(self, inp, out):
method to_state_setup (line 69) | def to_state_setup(self):
method from_state_setup (line 84) | def from_state_setup(state_setup: dict, **kwargs):
FILE: aalpy/automata/MealyMachine.py
class MealyState (line 7) | class MealyState(AutomatonState, Generic[InputType, OutputType]):
method __init__ (line 12) | def __init__(self, state_id):
class MealyMachine (line 18) | class MealyMachine(DeterministicAutomaton[MealyState[InputType, OutputTy...
method __init__ (line 20) | def __init__(self, initial_state: MealyState, states):
method step (line 23) | def step(self, letter):
method to_state_setup (line 39) | def to_state_setup(self):
method from_state_setup (line 52) | def from_state_setup(state_setup : dict, **kwargs):
FILE: aalpy/automata/MooreMachine.py
class MooreState (line 7) | class MooreState(AutomatonState, Generic[InputType,OutputType]):
method __init__ (line 12) | def __init__(self, state_id, output=None):
class MooreMachine (line 18) | class MooreMachine(DeterministicAutomaton[MooreState[InputType, OutputTy...
method __init__ (line 20) | def __init__(self, initial_state: AutomatonState, states: list):
method step (line 23) | def step(self, letter):
method compute_characterization_set (line 40) | def compute_characterization_set(self, char_set_init=None, online_suff...
method compute_output_seq (line 46) | def compute_output_seq(self, state, sequence):
method execute_sequence (line 51) | def execute_sequence(self, origin_state, seq):
method to_state_setup (line 57) | def to_state_setup(self):
method from_state_setup (line 70) | def from_state_setup(state_setup : dict, **kwargs):
FILE: aalpy/automata/NonDeterministicMooreMachine.py
class NDMooreState (line 9) | class NDMooreState(AutomatonState, Generic[InputType, OutputType]):
method __init__ (line 14) | def __init__(self, state_id, output=None):
class NDMooreMachine (line 20) | class NDMooreMachine(Automaton[NDMooreState[InputType, OutputType]]):
method to_state_setup (line 22) | def to_state_setup(self):
method from_state_setup (line 37) | def from_state_setup(state_setup: dict, **kwargs) -> 'NDMooreMachine':
method __init__ (line 49) | def __init__(self, initial_state: AutomatonState, states: list):
method step (line 52) | def step(self, letter):
FILE: aalpy/automata/Onfsm.py
class OnfsmState (line 9) | class OnfsmState(AutomatonState, Generic[InputType, OutputType]):
method __init__ (line 11) | def __init__(self, state_id):
method add_transition (line 17) | def add_transition(self, inp, out, new_state) :
method get_transition (line 30) | def get_transition(self, input, output=None):
class Onfsm (line 47) | class Onfsm(Automaton[OnfsmState[InputType, OutputType]]):
method __init__ (line 51) | def __init__(self, initial_state: OnfsmState, states: list):
method step (line 54) | def step(self, letter):
method outputs_on_input (line 71) | def outputs_on_input(self, letter):
method step_to (line 85) | def step_to(self, inp, out):
method from_state_setup (line 105) | def from_state_setup(state_setup : dict, **kwargs):
method to_state_setup (line 108) | def to_state_setup(self):
FILE: aalpy/automata/Sevpa.py
class SevpaAlphabet (line 8) | class SevpaAlphabet:
method __init__ (line 20) | def __init__(self, internal_alphabet: List[str], call_alphabet: List[s...
method get_merged_alphabet (line 27) | def get_merged_alphabet(self) -> List[str]:
method __str__ (line 40) | def __str__(self) -> str:
class SevpaState (line 48) | class SevpaState(AutomatonState):
method __init__ (line 53) | def __init__(self, state_id, is_accepting=False):
class SevpaTransition (line 60) | class SevpaTransition:
method __init__ (line 71) | def __init__(self, target: SevpaState, letter, action, stack_guard=None):
method __str__ (line 77) | def __str__(self):
class Sevpa (line 86) | class Sevpa(Automaton):
method __init__ (line 92) | def __init__(self, initial_state: SevpaState, states: List[SevpaState]):
method reset_to_initial (line 106) | def reset_to_initial(self):
method step (line 113) | def step(self, letter):
method get_state_by_id (line 164) | def get_state_by_id(self, state_id) -> Union[SevpaState, None]:
method is_input_complete (line 170) | def is_input_complete(self) -> bool:
method execute_sequence (line 173) | def execute_sequence(self, origin_state, seq):
method to_state_setup (line 180) | def to_state_setup(self):
method from_state_setup (line 198) | def from_state_setup(state_setup: dict, **kwargs):
method transform_access_string (line 226) | def transform_access_string(self, state=None, stack_content=None) -> L...
method create_daisy_hypothesis (line 261) | def create_daisy_hypothesis(initial_state, alphabet):
method get_input_alphabet (line 287) | def get_input_alphabet(self):
method get_error_state (line 304) | def get_error_state(self):
method delete_state (line 356) | def delete_state(self, state_to_remove):
method get_allowed_call_transitions (line 380) | def get_allowed_call_transitions(self):
method get_accepting_words_bfs (line 415) | def get_accepting_words_bfs(self, min_word_length: int = 0, num_words:...
method get_random_accepting_word (line 457) | def get_random_accepting_word(self, return_letter_prob: float = 0.5, m...
FILE: aalpy/automata/StochasticMealyMachine.py
class StochasticMealyState (line 10) | class StochasticMealyState(AutomatonState, Generic[InputType, OutputType]):
method __init__ (line 12) | def __init__(self, state_id):
class StochasticMealyMachine (line 18) | class StochasticMealyMachine(Automaton[StochasticMealyState[InputType, O...
method __init__ (line 20) | def __init__(self, initial_state: StochasticMealyState, states: list):
method reset_to_initial (line 23) | def reset_to_initial(self):
method step (line 26) | def step(self, letter):
method step_to (line 51) | def step_to(self, inp, out):
method to_mdp (line 70) | def to_mdp(self):
method to_state_setup (line 73) | def to_state_setup(self):
method from_state_setup (line 88) | def from_state_setup(state_setup : dict, **kwargs):
function smm_to_mdp_conversion (line 101) | def smm_to_mdp_conversion(smm: StochasticMealyMachine):
FILE: aalpy/automata/Vpa.py
class VpaAlphabet (line 8) | class VpaAlphabet:
method __init__ (line 20) | def __init__(self, internal_alphabet: List[str], call_alphabet: List[s...
method get_merged_alphabet (line 27) | def get_merged_alphabet(self) -> List[str]:
method __str__ (line 40) | def __str__(self) -> str:
class VpaState (line 48) | class VpaState(AutomatonState):
method __init__ (line 53) | def __init__(self, state_id, is_accepting=False):
class VpaTransition (line 59) | class VpaTransition:
method __init__ (line 71) | def __init__(self, start: VpaState, target: VpaState, symbol, action, ...
method __str__ (line 78) | def __str__(self):
class Vpa (line 82) | class Vpa(Automaton):
method __init__ (line 88) | def __init__(self, initial_state: VpaState, states):
method reset_to_initial (line 101) | def reset_to_initial(self):
method top (line 105) | def top(self):
method step (line 108) | def step(self, letter):
method to_state_setup (line 153) | def to_state_setup(self):
method get_input_alphabet (line 166) | def get_input_alphabet(self) -> VpaAlphabet:
method is_input_complete (line 182) | def is_input_complete(self) -> bool:
method from_state_setup (line 199) | def from_state_setup(state_setup: dict, **kwargs):
method is_balanced (line 251) | def is_balanced(self, seq):
method generate_random_accepting_word (line 255) | def generate_random_accepting_word(self, min_steps=4, max_steps=20):
function vpa_from_dfa_representation (line 304) | def vpa_from_dfa_representation(dfa_repr, vpa_alphabet):
FILE: aalpy/base/Automaton.py
class AutomatonState (line 8) | class AutomatonState(ABC):
method __init__ (line 10) | def __init__(self, state_id):
method get_diff_state_transitions (line 25) | def get_diff_state_transitions(self) -> list:
method get_same_state_transitions (line 35) | def get_same_state_transitions(self) -> list:
class Automaton (line 50) | class Automaton(ABC, Generic[AutomatonStateType]):
method __init__ (line 55) | def __init__(self, initial_state: AutomatonStateType, states: List[Aut...
method size (line 69) | def size(self):
method reset_to_initial (line 72) | def reset_to_initial(self):
method step (line 79) | def step(self, letter):
method is_input_complete (line 94) | def is_input_complete(self) -> bool:
method get_input_alphabet (line 111) | def get_input_alphabet(self):
method get_state_by_id (line 122) | def get_state_by_id(self, state_id) -> Union[AutomatonStateType, None]:
method __str__ (line 129) | def __str__(self):
method make_input_complete (line 136) | def make_input_complete(self, missing_transition_go_to='self_loop'):
method execute_sequence (line 145) | def execute_sequence(self, origin_state, seq):
method save (line 152) | def save(self, file_path='LearnedModel', file_type='dot'):
method visualize (line 156) | def visualize(self, path='LearnedModel', file_type='pdf', display_same...
method from_state_setup (line 162) | def from_state_setup(state_setup: dict, **kwargs) -> 'Automaton':
method to_state_setup (line 166) | def to_state_setup(self):
method copy (line 169) | def copy(self) -> 'Automaton':
method __reduce__ (line 172) | def __reduce__(self):
class DeterministicAutomaton (line 176) | class DeterministicAutomaton(Automaton[AutomatonStateType]):
method step (line 179) | def step(self, letter):
method get_shortest_path (line 182) | def get_shortest_path(self, origin_state: AutomatonStateType, target_s...
method is_strongly_connected (line 231) | def is_strongly_connected(self) -> bool:
method output_step (line 249) | def output_step(self, state, letter):
method find_distinguishing_seq (line 265) | def find_distinguishing_seq(self, state1, state2, alphabet):
method compute_output_seq (line 297) | def compute_output_seq(self, state, sequence):
method is_minimal (line 312) | def is_minimal(self):
method compute_characterization_set (line 318) | def compute_characterization_set(self, char_set_init=None,
method _split_blocks (line 413) | def _split_blocks(self, blocks, seq):
method compute_prefixes (line 433) | def compute_prefixes(self):
method minimize (line 438) | def minimize(self):
method __eq__ (line 454) | def __eq__(self, other):
FILE: aalpy/base/CacheTree.py
class Node (line 1) | class Node(object):
method __init__ (line 4) | def __init__(self, value=None):
class CacheTree (line 9) | class CacheTree:
method __init__ (line 18) | def __init__(self):
method reset (line 24) | def reset(self):
method step_in_cache (line 29) | def step_in_cache(self, inp, out):
method in_cache (line 61) | def in_cache(self, input_seq: tuple):
method add_to_cache (line 87) | def add_to_cache(self, input_sequence, output_sequence):
class CacheDict (line 96) | class CacheDict:
method __init__ (line 105) | def __init__(self):
method reset (line 109) | def reset(self):
method step_in_cache (line 113) | def step_in_cache(self, inp, out):
method in_cache (line 143) | def in_cache(self, input_seq: tuple):
method add_to_cache (line 161) | def add_to_cache(self, input_sequence, output_sequence):
method get_output_sequence (line 168) | def get_output_sequence(self, input_seq):
FILE: aalpy/base/Oracle.py
class Oracle (line 6) | class Oracle(ABC):
method __init__ (line 9) | def __init__(self, alphabet: list, sul: SUL):
method find_cex (line 25) | def find_cex(self, hypothesis):
method reset_hyp_and_sul (line 40) | def reset_hyp_and_sul(self, hypothesis):
FILE: aalpy/base/SUL.py
class SUL (line 6) | class SUL(ABC):
method __init__ (line 13) | def __init__(self):
method query (line 18) | def query(self, word: tuple) -> list:
method io_query (line 43) | def io_query(self, word : tuple):
method adaptive_query (line 46) | def adaptive_query(self, word, ads):
method pre (line 95) | def pre(self):
method post (line 102) | def post(self):
method step (line 109) | def step(self, letter):
class CacheSUL (line 125) | class CacheSUL(SUL):
method __init__ (line 131) | def __init__(self, sul: SUL, cache_type='tree'):
method query (line 136) | def query(self, word):
method pre (line 168) | def pre(self):
method post (line 175) | def post(self):
method step (line 178) | def step(self, letter):
FILE: aalpy/learning_algs/adaptive/AdaptiveLSharp.py
function run_adaptive_Lsharp (line 9) | def run_adaptive_Lsharp(alphabet: list, sul: SUL, references: list, eq_o...
FILE: aalpy/learning_algs/adaptive/AdaptiveObservationTree.py
class AdaptiveObservationTree (line 10) | class AdaptiveObservationTree(ObservationTree):
method __init__ (line 11) | def __init__(self, alphabet, sul, references, automaton_type, extensio...
method build_hypothesis (line 53) | def build_hypothesis(self):
method make_observation_tree_adequate_matching (line 72) | def make_observation_tree_adequate_matching(self):
method make_frontiers_identified_with_matching (line 100) | def make_frontiers_identified_with_matching(self):
method identify_frontier_with_matching (line 109) | def identify_frontier_with_matching(self, frontier_state):
method identify_frontier_with_identifiers (line 132) | def identify_frontier_with_identifiers(self, frontier_state, identifie...
method match_refinement (line 163) | def match_refinement(self):
method find_distinguishing_seq_partial (line 172) | def find_distinguishing_seq_partial(self, model, state1, state2, alpha...
method refine_matches_basis (line 197) | def refine_matches_basis(self, basis_state, matches):
method match_separation (line 226) | def match_separation(self):
method match_separation_frontier (line 251) | def match_separation_frontier(self, matched_states, frontier_state, ba...
method promote_frontier_state (line 279) | def promote_frontier_state(self):
method insert_observation (line 297) | def insert_observation(self, inputs, outputs):
method extend_node_and_update_matching (line 314) | def extend_node_and_update_matching(self, inputs, outputs):
method rebuild_obs_tree (line 337) | def rebuild_obs_tree(self):
method prioritized_promotion (line 356) | def prioritized_promotion(self):
method find_frontier_new_basis (line 370) | def find_frontier_new_basis(self):
method find_basis_frontier_pair (line 386) | def find_basis_frontier_pair(self, frontier_state, frontier_state_acce...
method insert_observation_rebuilding (line 414) | def insert_observation_rebuilding(self, inputs, outputs):
method apart_from_all (line 432) | def apart_from_all(self, frontier_state):
method add_ref_transitions_to_states (line 443) | def add_ref_transitions_to_states(self, reference, reference_id):
method compute_prefix_map (line 464) | def compute_prefix_map(self, reference, reference_id):
method compute_characterization_map (line 473) | def compute_characterization_map(self, reference, states):
method get_combined_model (line 482) | def get_combined_model(self):
FILE: aalpy/learning_algs/adaptive/StateMatching.py
class StateMatching (line 4) | class StateMatching:
method __init__ (line 5) | def __init__(self, alphabet, combined_model):
method add_entry_basis (line 17) | def add_entry_basis(self, basis_state):
method update_best_score (line 21) | def update_best_score(self, basis_state):
method update_score (line 25) | def update_score(self, ob_tree, basis_state, reference_state, basis_st...
method update_best_match (line 29) | def update_best_match(self, basis_state, score):
method initialize_matching (line 32) | def initialize_matching(self, ob_tree):
method update_matching (line 49) | def update_matching(self, to_recalc, split, ob_tree):
method update_matching_basis (line 81) | def update_matching_basis(self, basis_state, ob_tree):
method find_longest_words (line 97) | def find_longest_words(self, current_state, ob_tree, all_seqs):
method validate_reference_input (line 115) | def validate_reference_input(self, inputs, reference_state):
method is_prefix_of (line 124) | def is_prefix_of(self, str1, str2):
method find_longest_common_part (line 133) | def find_longest_common_part(self, str1, str2):
method print_match_table (line 143) | def print_match_table(self, ob_tree):
class TotalStateMatching (line 167) | class TotalStateMatching(StateMatching):
method __init__ (line 175) | def __init__(self, alphabet, combined_model):
method add_entry_basis (line 178) | def add_entry_basis(self, basis_state, aut_type):
method update_best_score (line 195) | def update_best_score(self, basis_state):
method update_best_match (line 201) | def update_best_match(self, basis_state, score):
method update_score (line 213) | def update_score(self, ob_tree, basis_state, reference_state, basis_st...
method update_score_mealy (line 221) | def update_score_mealy(self, ob_tree, basis_state, reference_state, ba...
method update_score_moore (line 248) | def update_score_moore(self, ob_tree, basis_state, reference_state, ba...
class ApproximateStateMatching (line 285) | class ApproximateStateMatching(StateMatching):
method __init__ (line 293) | def __init__(self, alphabet, combined_model):
method add_entry_basis (line 297) | def add_entry_basis(self, basis_state, aut_type):
method get_score (line 315) | def get_score(self, basis_state, ref_state):
method update_best_score (line 321) | def update_best_score(self, basis_state):
method update_best_match (line 328) | def update_best_match(self, basis_state, score):
method update_score (line 340) | def update_score(self, ob_tree, basis_state, reference_state, basis_st...
method update_score_mealy (line 347) | def update_score_mealy(self, ob_tree, basis_state, reference_state, ba...
method update_score_moore (line 371) | def update_score_moore(self, ob_tree, basis_state, reference_state, ba...
FILE: aalpy/learning_algs/deterministic/ADS.py
class AdsNode (line 4) | class AdsNode:
method __init__ (line 7) | def __init__(self, input_val=None, children=None, score=0):
method create_leaf (line 13) | def create_leaf():
method get_input (line 16) | def get_input(self):
method get_child_node (line 19) | def get_child_node(self, output):
method get_score (line 24) | def get_score(self):
class Ads (line 28) | class Ads:
method __init__ (line 29) | def __init__(self, ob_tree, current_block):
method get_score (line 33) | def get_score(self):
method construct_ads (line 36) | def construct_ads(self, ob_tree, current_block):
method construct_ads_rec (line 62) | def construct_ads_rec(self, ob_tree, current_block):
method compute_output_subtree (line 107) | def compute_output_subtree(self, ob_tree, partition, u_i):
method compute_score (line 113) | def compute_score(self, u_io, u_i, child_score):
method partition_on_output_empty (line 117) | def partition_on_output_empty(self, block, automaton_type):
method partition_on_output (line 126) | def partition_on_output(self, block, input_val, automaton_type):
method next_input (line 144) | def next_input(self, prev_output):
method maximal_base_input (line 153) | def maximal_base_input(self, alphabet, block, automaton_type):
method reset_to_root (line 175) | def reset_to_root(self):
FILE: aalpy/learning_algs/deterministic/Apartness.py
class Apartness (line 4) | class Apartness:
method compute_witness (line 6) | def compute_witness(state1, state2, ob_tree):
method states_are_apart (line 19) | def states_are_apart(state1, state2, ob_tree):
method _show_states_are_apart_mealy (line 27) | def _show_states_are_apart_mealy(first, second, alphabet):
method _show_states_are_apart_moore (line 47) | def _show_states_are_apart_moore(first, second, alphabet):
method compute_witness_in_tree_and_hypothesis_states (line 66) | def compute_witness_in_tree_and_hypothesis_states(ob_tree, ob_tree_sta...
method compute_witness_in_tree_and_hypothesis_states_mealy (line 76) | def compute_witness_in_tree_and_hypothesis_states_mealy(ob_tree, ob_tr...
method compute_witness_in_tree_and_hypothesis_states_moore (line 100) | def compute_witness_in_tree_and_hypothesis_states_moore(ob_tree, ob_tr...
FILE: aalpy/learning_algs/deterministic/ClassificationTree.py
class CTNode (line 14) | class CTNode:
method __init__ (line 17) | def __init__(self, parent, path_to_node):
method is_leaf (line 21) | def is_leaf(self):
class CTInternalNode (line 25) | class CTInternalNode(CTNode):
method __init__ (line 28) | def __init__(self, distinguishing_string: tuple, parent, path_to_node):
method is_leaf (line 33) | def is_leaf(self):
class CTLeafNode (line 37) | class CTLeafNode(CTNode):
method __init__ (line 40) | def __init__(self, access_string: tuple, parent, path_to_node):
method __repr__ (line 44) | def __repr__(self):
method output (line 48) | def output(self):
method is_leaf (line 58) | def is_leaf(self):
class ClassificationTree (line 62) | class ClassificationTree:
method __init__ (line 63) | def __init__(self, alphabet: Union[list, SevpaAlphabet], sul: SUL, aut...
method _sift (line 111) | def _sift(self, word):
method update_hypothesis (line 148) | def update_hypothesis(self):
method _least_common_ancestor (line 251) | def _least_common_ancestor(self, node_1_id, node_2_id):
method update (line 293) | def update(self, cex: tuple, hypothesis):
method process_counterexample (line 335) | def process_counterexample(self, cex: tuple, hypothesis, cex_processin...
method _insert_new_leaf (line 403) | def _insert_new_leaf(self, discriminator, old_leaf_access_string, new_...
FILE: aalpy/learning_algs/deterministic/CounterExampleProcessing.py
function counterexample_successfully_processed (line 5) | def counterexample_successfully_processed(sul, cex, hypothesis):
function longest_prefix_cex_processing (line 11) | def longest_prefix_cex_processing(s_union_s_dot_a: list, cex: tuple, clo...
function rs_cex_processing (line 46) | def rs_cex_processing(sul: SUL, cex: tuple, hypothesis, suffix_closednes...
function linear_cex_processing (line 113) | def linear_cex_processing(sul: SUL, cex: tuple, hypothesis, suffix_close...
function exponential_cex_processing (line 160) | def exponential_cex_processing(sul: SUL, cex: tuple, hypothesis, suffix_...
FILE: aalpy/learning_algs/deterministic/KV.py
function run_KV (line 17) | def run_KV(alphabet: Union[list, SevpaAlphabet], sul: SUL, eq_oracle: Or...
FILE: aalpy/learning_algs/deterministic/LSharp.py
function run_Lsharp (line 9) | def run_Lsharp(alphabet: list, sul: SUL, eq_oracle: Oracle, automaton_type,
FILE: aalpy/learning_algs/deterministic/LStar.py
function run_Lstar (line 16) | def run_Lstar(alphabet: list, sul: SUL, eq_oracle: Oracle, automaton_typ...
FILE: aalpy/learning_algs/deterministic/ObservationTable.py
class ObservationTable (line 10) | class ObservationTable:
method __init__ (line 11) | def __init__(self, alphabet: list, sul: SUL, automaton_type, prefixes_...
method get_rows_to_close (line 49) | def get_rows_to_close(self, closing_strategy='longest_first'):
method get_causes_of_inconsistency (line 94) | def get_causes_of_inconsistency(self):
method s_dot_a (line 116) | def s_dot_a(self):
method update_obs_table (line 126) | def update_obs_table(self, s_set: list = None, e_set: list = None):
method gen_hypothesis (line 156) | def gen_hypothesis(self, no_cex_processing_used=False) -> Automaton:
method _get_row_representatives (line 213) | def _get_row_representatives(self):
FILE: aalpy/learning_algs/deterministic/ObservationTree.py
class MooreNode (line 7) | class MooreNode:
method __init__ (line 11) | def __init__(self, parent=None):
method __hash__ (line 19) | def __hash__(self):
method add_successor (line 22) | def add_successor(self, input_val, output_val, successor_node):
method get_successor (line 27) | def get_successor(self, input_val):
method extend_and_get (line 33) | def extend_and_get(self, inp, output):
method id_counter (line 43) | def id_counter(self):
class MealyNode (line 47) | class MealyNode:
method __init__ (line 51) | def __init__(self, parent=None):
method __hash__ (line 58) | def __hash__(self):
method add_successor (line 61) | def add_successor(self, input_val, output_val, successor_node):
method get_successor (line 65) | def get_successor(self, input_val):
method get_output (line 71) | def get_output(self, input_val):
method extend_and_get (line 77) | def extend_and_get(self, inp, output):
method id_counter (line 91) | def id_counter(self):
class ObservationTree (line 95) | class ObservationTree:
method __init__ (line 96) | def __init__(self, alphabet, sul, automaton_type, extension_rule, sepa...
method insert_observation (line 125) | def insert_observation(self, inputs, outputs):
method get_observation (line 134) | def get_observation(self, inputs):
method get_outputs (line 150) | def get_outputs(self, basis_state, inputs):
method get_successor (line 167) | def get_successor(self, inputs):
method get_transfer_sequence (line 178) | def get_transfer_sequence(self, from_node, to_node):
method get_access_sequence (line 192) | def get_access_sequence(self, to_node):
method get_size (line 206) | def get_size(self):
method update_frontier_and_basis (line 210) | def update_frontier_and_basis(self):
method update_basis_candidates (line 217) | def update_basis_candidates(self, frontier_state):
method update_frontier_to_basis_dict (line 231) | def update_frontier_to_basis_dict(self):
method promote_frontier_state (line 242) | def promote_frontier_state(self):
method check_frontier_consistency (line 257) | def check_frontier_consistency(self):
method is_observation_tree_adequate (line 272) | def is_observation_tree_adequate(self):
method make_basis_complete (line 291) | def make_basis_complete(self):
method find_basis_candidates (line 301) | def find_basis_candidates(self, new_frontier):
method explore_frontier (line 307) | def explore_frontier(self, basis_state, inp):
method adaptive_output_query (line 331) | def adaptive_output_query(self, prefix, infix, ads):
method adaptive_output_query_base (line 336) | def adaptive_output_query_base(self, prefix, ads):
method _answer_ads_from_tree (line 359) | def _answer_ads_from_tree(self, ads, from_node):
method get_or_compute_witness (line 390) | def get_or_compute_witness(self, state_one, state_two):
method make_frontiers_identified (line 407) | def make_frontiers_identified(self):
method identify_frontier (line 412) | def identify_frontier(self, frontier_state):
method _identify_frontier_sepseq (line 434) | def _identify_frontier_sepseq(self, frontier_state):
method _identify_frontier_ads (line 448) | def _identify_frontier_ads(self, frontier_state):
method construct_hypothesis_states (line 455) | def construct_hypothesis_states(self):
method construct_hypothesis_transitions (line 472) | def construct_hypothesis_transitions(self):
method construct_hypothesis (line 495) | def construct_hypothesis(self):
method build_hypothesis (line 508) | def build_hypothesis(self):
method make_observation_tree_adequate (line 521) | def make_observation_tree_adequate(self):
method process_counter_example (line 531) | def process_counter_example(self, hypothesis, cex_inputs, cex_outputs):
method _get_counter_example_prefix_index (line 544) | def _get_counter_example_prefix_index(self, cex_outputs, hyp_outputs):
method _process_binary_search (line 551) | def _process_binary_search(self, hypothesis, cex_inputs, cex_outputs):
method _get_automaton_successor (line 604) | def _get_automaton_successor(self, automaton, from_state, inputs):
FILE: aalpy/learning_algs/deterministic_passive/ClassicRPNI.py
class ClassicRPNI (line 7) | class ClassicRPNI:
method __init__ (line 8) | def __init__(self, data, automaton_type, print_info=True):
method run_rpni (line 20) | def run_rpni(self):
method _compatible (line 56) | def _compatible(self, root_node):
method _merge (line 65) | def _merge(self, red_node, lex_min_blue, copy_nodes=False):
method _fold (line 89) | def _fold(self, red_node, blue_node):
method _fold_mealy (line 99) | def _fold_mealy(self, red_node, blue_node):
FILE: aalpy/learning_algs/deterministic_passive/GsmRPNI.py
class GsmRPNI (line 7) | class GsmRPNI:
method __init__ (line 8) | def __init__(self, data, automaton_type, print_info=True):
method run_rpni (line 21) | def run_rpni(self):
method _partition_from_merge (line 69) | def _partition_from_merge(self, red: RpniNode, blue: RpniNode):
FILE: aalpy/learning_algs/deterministic_passive/PAPNI.py
function run_PAPNI (line 4) | def run_PAPNI(data, vpa_alphabet, algorithm='edsm', print_info=True):
FILE: aalpy/learning_algs/deterministic_passive/RPNI.py
function run_RPNI (line 8) | def run_RPNI(data, automaton_type, algorithm='gsm',
FILE: aalpy/learning_algs/deterministic_passive/active_RPNI.py
class RpniActiveSampler (line 8) | class RpniActiveSampler(ABC):
method sample (line 14) | def sample(self, sul, model):
class RandomWordSampler (line 31) | class RandomWordSampler(RpniActiveSampler):
method __init__ (line 32) | def __init__(self, num_walks, min_walk_len, max_walk_len):
method sample (line 37) | def sample(self, sul, model):
function run_active_RPNI (line 52) | def run_active_RPNI(data, sul, sampler, n_iter, automaton_type, print_in...
FILE: aalpy/learning_algs/deterministic_passive/rpni_helper_functions.py
class RpniNode (line 6) | class RpniNode:
method __init__ (line 9) | def __init__(self, output=None, children=None, automaton_type='moore'):
method shallow_copy (line 19) | def shallow_copy(self):
method copy (line 23) | def copy(self):
method __lt__ (line 26) | def __lt__(self, other):
method __eq__ (line 30) | def __eq__(self, other):
method __hash__ (line 33) | def __hash__(self):
method compatible_outputs (line 36) | def compatible_outputs(self, other):
method get_child_by_prefix (line 51) | def get_child_by_prefix(self, prefix):
function check_sequence (line 58) | def check_sequence(root_node, seq, automaton_type):
function createPTA (line 81) | def createPTA(data, automaton_type):
function extract_unique_sequences (line 108) | def extract_unique_sequences(root_node, automaton_type):
function to_automaton (line 138) | def to_automaton(red, automaton_type):
function visualize_pta (line 172) | def visualize_pta(root_node, path='pta.pdf'):
FILE: aalpy/learning_algs/general_passive/GeneralizedStateMerging.py
class Partitioning (line 13) | class Partitioning:
method __init__ (line 14) | def __init__(self, red: GsmNode, blue: GsmNode):
class Instrumentation (line 22) | class Instrumentation:
method __init__ (line 23) | def __init__(self):
method reset (line 26) | def reset(self, gsm: 'GeneralizedStateMerging'):
method pta_construction_done (line 29) | def pta_construction_done(self, root: GsmNode):
method log_promote (line 32) | def log_promote(self, node: GsmNode):
method log_merge (line 35) | def log_merge(self, part: Partitioning):
method learning_done (line 38) | def learning_done(self, root: GsmNode):
class GeneralizedStateMerging (line 42) | class GeneralizedStateMerging:
method __init__ (line 43) | def __init__(self, *,
method compute_local_compatibility (line 85) | def compute_local_compatibility(self, a: GsmNode, b: GsmNode):
method run (line 94) | def run(self, data, convert=True, instrumentation: Instrumentation=Non...
method _check_futures (line 202) | def _check_futures(self, red: GsmNode, blue: GsmNode) -> bool:
method _partition_from_merge (line 223) | def _partition_from_merge(self, red: GsmNode, blue: GsmNode) -> Partit...
function run_GSM (line 304) | def run_GSM(data: list, *,
FILE: aalpy/learning_algs/general_passive/GsmAlgorithms.py
function run_EDSM (line 12) | def run_EDSM(data, automaton_type, input_completeness=None, print_info=T...
function run_k_tails (line 63) | def run_k_tails(data, automaton_type, k, input_completeness=None, print_...
function run_Alergia_EDSM (line 116) | def run_Alergia_EDSM(data, automaton_type, eps=0.05, print_info=False):
FILE: aalpy/learning_algs/general_passive/GsmNode.py
function intersection_iterator (line 36) | def intersection_iterator(a: Dict[Key, Val], b: Dict[Key, Val]) -> Itera...
function union_iterator (line 45) | def union_iterator(a: Dict[Key, Val], b: Dict[Key, Val], default: Val = ...
function detect_data_format (line 57) | def detect_data_format(data, check_consistency=False, guess=False):
class TransitionInfo (line 101) | class TransitionInfo:
method __init__ (line 104) | def __init__(self, target, count, original_target, original_count):
class GsmNode (line 113) | class GsmNode:
method __init__ (line 125) | def __init__(self, prefix_access_pair, predecessor: 'GsmNode' = None):
method __lt__ (line 131) | def __lt__(self, other, compare_length_only=False):
method get_prefix_length (line 146) | def get_prefix_length(self):
method get_prefix_output (line 154) | def get_prefix_output(self):
method get_prefix_input (line 157) | def get_prefix_input(self):
method resolve_unknown_prefix_output (line 160) | def resolve_unknown_prefix_output(self, value):
method get_prefix (line 164) | def get_prefix(self, include_output=True):
method get_root (line 176) | def get_root(self):
method get_or_create_transitions (line 182) | def get_or_create_transitions(self, in_sym) -> Dict[Any, TransitionInfo]:
method transition_iterator (line 189) | def transition_iterator(self) -> Iterable[Tuple[Any, Any, TransitionIn...
method shallow_copy (line 194) | def shallow_copy(self) -> 'GsmNode':
method get_by_prefix (line 203) | def get_by_prefix(self, seq: IOTrace) -> Optional['GsmNode']:
method get_all_nodes (line 217) | def get_all_nodes(self) -> List['GsmNode']:
method is_tree (line 228) | def is_tree(self):
method to_automaton (line 241) | def to_automaton(self, output_behavior: OutputBehavior, transition_beh...
method visualize (line 305) | def visualize(self, path: Union[str, pathlib.Path], output_behavior: O...
method make_input_complete (line 378) | def make_input_complete(self) -> List[Tuple['GsmNode', Any, Any]]:
method add_trace (line 392) | def add_trace(self, trace: IOTrace):
method add_labeled_sequence (line 406) | def add_labeled_sequence(self, example: IOExample):
method createPTA (line 440) | def createPTA(data, output_behavior, data_format=None) -> 'GsmNode':
method is_locally_deterministic (line 465) | def is_locally_deterministic(self):
method is_deterministic (line 468) | def is_deterministic(self):
method deterministic_compatible (line 471) | def deterministic_compatible(self, other: 'GsmNode'):
method is_moore (line 479) | def is_moore(self):
method moore_compatible (line 487) | def moore_compatible(self, other: 'GsmNode'):
method local_log_likelihood_contribution (line 492) | def local_log_likelihood_contribution(self):
method count (line 503) | def count(self):
FILE: aalpy/learning_algs/general_passive/Instrumentation.py
class ProgressReport (line 9) | class ProgressReport(Instrumentation):
method __init__ (line 10) | def __init__(self, lvl):
method reset (line 26) | def reset(self, gsm: GeneralizedStateMerging):
method pta_construction_done (line 37) | def pta_construction_done(self, root):
method print_status (line 51) | def print_status(self):
method log_promote (line 58) | def log_promote(self, node: GsmNode):
method log_merge (line 63) | def log_merge(self, part: Partitioning):
method learning_done (line 69) | def learning_done(self, root: GsmNode):
class MergeViolationDebugger (line 79) | class MergeViolationDebugger(Instrumentation):
method __init__ (line 80) | def __init__(self, ground_truth: GsmNode):
method reset (line 87) | def reset(self, gsm: GeneralizedStateMerging):
method log_promote (line 92) | def log_promote(self, new_red: GsmNode):
method log_merge (line 106) | def log_merge(self, part: Partitioning):
FILE: aalpy/learning_algs/general_passive/ScoreFunctionsGSM.py
class ScoreCalculation (line 11) | class ScoreCalculation:
method __init__ (line 12) | def __init__(self, local_compatibility: LocalCompatibilityFunction = N...
method reset (line 22) | def reset(self):
method default_local_compatibility (line 26) | def default_local_compatibility(a: GsmNode, b: GsmNode):
method default_score_function (line 30) | def default_score_function(part: Dict[GsmNode, GsmNode]):
method has_score_function (line 33) | def has_score_function(self):
method has_local_compatibility (line 36) | def has_local_compatibility(self):
function hoeffding_compatibility (line 40) | def hoeffding_compatibility(eps, compare_original=True) -> LocalCompatib...
class ScoreWithKTail (line 63) | class ScoreWithKTail(ScoreCalculation):
method __init__ (line 66) | def __init__(self, other_score: ScoreCalculation, k: int):
method reset (line 73) | def reset(self):
method local_compatibility (line 77) | def local_compatibility(self, a: GsmNode, b: GsmNode):
class ScoreWithSinks (line 88) | class ScoreWithSinks(ScoreCalculation):
method __init__ (line 91) | def __init__(self, other_score: ScoreCalculation, sink_cond: Callable[...
method reset (line 99) | def reset(self):
method local_compatibility (line 103) | def local_compatibility(self, a: GsmNode, b: GsmNode):
class ScoreCombinator (line 114) | class ScoreCombinator(ScoreCalculation):
method __init__ (line 120) | def __init__(self, scores: List[ScoreCalculation], aggregate_compatibi...
method reset (line 127) | def reset(self):
method local_compatibility (line 131) | def local_compatibility(self, a: GsmNode, b: GsmNode):
method score_function (line 134) | def score_function(self, part: Dict[GsmNode, GsmNode]):
method default_aggregate_compatibility (line 138) | def default_aggregate_compatibility(compatibility_iterable):
method default_aggregate_score (line 147) | def default_aggregate_score(score_iterable):
function local_to_global_compatibility (line 151) | def local_to_global_compatibility(local_fun: LocalCompatibilityFunction)...
function differential_info (line 168) | def differential_info(part: Dict[GsmNode, GsmNode]):
function transform_score (line 181) | def transform_score(score, transform: Callable):
function make_greedy (line 190) | def make_greedy(score):
function lower_threshold (line 194) | def lower_threshold(score, thresh):
function AIC_score (line 198) | def AIC_score(alpha=0) -> ScoreFunction:
function EDSM_frequency_score (line 206) | def EDSM_frequency_score(min_evidence=-1) -> ScoreFunction:
function EDSM_score (line 219) | def EDSM_score(min_evidence=-1) -> ScoreFunction:
FILE: aalpy/learning_algs/non_deterministic/AbstractedOnfsmLstar.py
function run_abstracted_ONFSM_Lstar (line 11) | def run_abstracted_ONFSM_Lstar(alphabet: list, sul: SUL, eq_oracle: Orac...
FILE: aalpy/learning_algs/non_deterministic/AbstractedOnfsmObservationTable.py
class AbstractedNonDetObservationTable (line 9) | class AbstractedNonDetObservationTable:
method __init__ (line 10) | def __init__(self, alphabet: list, sul: NonDeterministicSULWrapper, ab...
method update_obs_table (line 38) | def update_obs_table(self, s_set=None, e_set: list = None):
method abstract_obs_table (line 56) | def abstract_obs_table(self):
method add_to_T (line 80) | def add_to_T(self, s, e, value):
method get_all_outputs (line 98) | def get_all_outputs(self, s, e):
method update_extended_S (line 103) | def update_extended_S(self, row_prefix=None):
method get_row_to_close (line 115) | def get_row_to_close(self):
method get_row_to_complete (line 137) | def get_row_to_complete(self):
method get_row_to_make_consistent (line 172) | def get_row_to_make_consistent(self):
method get_distinctive_input_sequence (line 209) | def get_distinctive_input_sequence(self, first_row, second_row, inp):
method update_E (line 230) | def update_E(self, seq):
method clean_obs_table (line 234) | def clean_obs_table(self):
method row_to_hashable (line 265) | def row_to_hashable(self, row_prefix):
method gen_hypothesis (line 285) | def gen_hypothesis(self) -> Onfsm:
method extend_S_dot_A (line 333) | def extend_S_dot_A(self, cex_prefixes: list):
method get_abstraction (line 353) | def get_abstraction(self, out):
method cex_processing (line 367) | def cex_processing(self, cex: tuple, hypothesis: Onfsm):
method clean_tables (line 415) | def clean_tables(self):
FILE: aalpy/learning_algs/non_deterministic/NonDeterministicSULWrapper.py
class NonDeterministicSULWrapper (line 5) | class NonDeterministicSULWrapper(SUL):
method __init__ (line 10) | def __init__(self, sul: SUL):
method pre (line 15) | def pre(self):
method post (line 19) | def post(self):
method step (line 22) | def step(self, letter):
FILE: aalpy/learning_algs/non_deterministic/OnfsmLstar.py
function run_non_det_Lstar (line 14) | def run_non_det_Lstar(alphabet: list, sul: SUL, eq_oracle: Oracle, n_sam...
function counterexample_not_valid (line 142) | def counterexample_not_valid(hypothesis, cex):
FILE: aalpy/learning_algs/non_deterministic/OnfsmObservationTable.py
class NonDetObservationTable (line 7) | class NonDetObservationTable:
method __init__ (line 9) | def __init__(self, alphabet: list, sul: NonDeterministicSULWrapper, n_...
method get_row_to_close (line 43) | def get_row_to_close(self):
method get_extended_S (line 68) | def get_extended_S(self, row_prefix=None):
method query_missing_observations (line 92) | def query_missing_observations(self, s=None, e=None):
method row_to_hashable (line 101) | def row_to_hashable(self, row_prefix):
method clean_obs_table (line 127) | def clean_obs_table(self):
method gen_hypothesis (line 154) | def gen_hypothesis(self, stochastic=False):
FILE: aalpy/learning_algs/non_deterministic/TraceTree.py
class Node (line 4) | class Node:
method __init__ (line 7) | def __init__(self, output):
method get_child (line 15) | def get_child(self, inp, out):
method get_prefix (line 26) | def get_prefix(self):
class TraceTree (line 35) | class TraceTree:
method __init__ (line 40) | def __init__(self):
method reset (line 44) | def reset(self):
method add_to_tree (line 47) | def add_to_tree(self, inp, out):
method add_trace (line 66) | def add_trace(self, inputs, outputs):
method get_to_node (line 71) | def get_to_node(self, inputs, outputs):
method get_all_traces (line 92) | def get_all_traces(self, prefix, e=None):
method get_table (line 128) | def get_table(self, s, e):
method find_cex_in_cache (line 148) | def find_cex_in_cache(self, hypothesis):
method get_s_e_sampling_frequency (line 171) | def get_s_e_sampling_frequency(self, prefix, suffix):
method get_sampling_distributions (line 192) | def get_sampling_distributions(self, prefix, input_from_alphabet):
FILE: aalpy/learning_algs/stochastic/DifferenceChecker.py
class DifferenceChecker (line 27) | class DifferenceChecker(ABC):
method are_cells_different (line 30) | def are_cells_different(self, c1: dict, c2: dict, **kwargs) -> bool:
method difference_value (line 33) | def difference_value(self, c1: dict, c2: dict):
method use_diff_value (line 36) | def use_diff_value(self):
class HoeffdingChecker (line 40) | class HoeffdingChecker(DifferenceChecker):
method __init__ (line 42) | def __init__(self, alpha=0.05):
method are_cells_different (line 45) | def are_cells_different(self, c1: dict, c2: dict, **kwargs) -> bool:
function compute_epsilon (line 60) | def compute_epsilon(alpha1, n1):
class AdvancedHoeffdingChecker (line 65) | class AdvancedHoeffdingChecker(DifferenceChecker):
method __init__ (line 66) | def __init__(self, alpha=0.05, use_diff=False):
method are_cells_different (line 70) | def are_cells_different(self, c1: dict, c2: dict, **kwargs) -> bool:
method use_diff_value (line 87) | def use_diff_value(self):
method difference_value (line 90) | def difference_value(self, c1_out_freq: dict, c2_out_freq: dict):
class ChiSquareChecker (line 111) | class ChiSquareChecker(DifferenceChecker):
method __init__ (line 113) | def __init__(self, alpha=0.001, use_diff_value=False):
method are_cells_different (line 121) | def are_cells_different(self, c1_out_freq: dict, c2_out_freq: dict, **...
method use_diff_value (line 144) | def use_diff_value(self):
method difference_value (line 147) | def difference_value(self, c1_out_freq: dict, c2_out_freq: dict):
method compute_Q (line 162) | def compute_Q(self, c1_out_freq, c2_out_freq, keys):
FILE: aalpy/learning_algs/stochastic/SamplingBasedObservationTable.py
class SamplingBasedObservationTable (line 9) | class SamplingBasedObservationTable:
method __init__ (line 10) | def __init__(self, input_alphabet: list, automaton_type, teacher: Stoc...
method refine_not_completed_cells (line 53) | def refine_not_completed_cells(self, n_resample, uniform=False):
method update_obs_table_with_freq_obs (line 114) | def update_obs_table_with_freq_obs(self, element_of_s=None):
method get_extended_s (line 136) | def get_extended_s(self, element_of_s=None):
method make_closed_and_consistent (line 157) | def make_closed_and_consistent(self):
method get_row_to_close (line 186) | def get_row_to_close(self):
method get_consistency_violation (line 204) | def get_consistency_violation(self, ignore=None):
method get_representative (line 235) | def get_representative(self, target):
method trim_columns (line 278) | def trim_columns(self):
method trim (line 307) | def trim(self, hypothesis):
method stop (line 361) | def stop(self, learning_round, chaos_cex_present, cex, stopping_range_...
method get_unamb_percentage (line 420) | def get_unamb_percentage(self):
method are_cells_incompatible (line 434) | def are_cells_incompatible(self, s1, s2, e):
method are_rows_compatible (line 460) | def are_rows_compatible(self, s1, s2, e_ignore=None):
method update_compatibility_classes (line 485) | def update_compatibility_classes(self):
method chaos_counterexample (line 523) | def chaos_counterexample(self, hypothesis):
method add_to_PTA (line 553) | def add_to_PTA(self, pta_root, trace, uncertainty_value=None):
method generate_hypothesis (line 582) | def generate_hypothesis(self):
FILE: aalpy/learning_algs/stochastic/StochasticCexProcessing.py
function stochastic_longest_prefix (line 5) | def stochastic_longest_prefix(cex, prefixes):
function stochastic_rs (line 45) | def stochastic_rs(sul: SUL, cex: tuple, hypothesis):
FILE: aalpy/learning_algs/stochastic/StochasticLStar.py
function run_stochastic_Lstar (line 24) | def run_stochastic_Lstar(input_alphabet, sul: SUL, eq_oracle: Oracle, ta...
FILE: aalpy/learning_algs/stochastic/StochasticTeacher.py
class StochasticSUL (line 8) | class StochasticSUL(SUL):
method __init__ (line 9) | def __init__(self, sul, teacher):
method pre (line 14) | def pre(self):
method post (line 19) | def post(self):
method step (line 22) | def step(self, letter):
class Node (line 29) | class Node:
method __init__ (line 34) | def __init__(self, output):
method get_child (line 40) | def get_child(self, inp, out):
method get_frequency_sum (line 57) | def get_frequency_sum(self, input_letter):
method get_output_frequencies (line 65) | def get_output_frequencies(self, input_letter):
class StochasticTeacher (line 81) | class StochasticTeacher:
method __init__ (line 87) | def __init__(self, sul: SUL, n_c, eq_oracle, automaton_type, compatibi...
method back_to_root (line 111) | def back_to_root(self):
method add (line 114) | def add(self, inp, out):
method frequency_query (line 133) | def frequency_query(self, s: tuple, e: tuple):
method complete_query (line 166) | def complete_query(self, s: tuple, e: tuple):
method tree_query (line 211) | def tree_query(self, pta_root):
method single_dfs_for_cex (line 267) | def single_dfs_for_cex(self, stop_prob, hypothesis):
method dfs_for_cex_in_tree (line 306) | def dfs_for_cex_in_tree(self, hypothesis, nr_traces, stop_prob):
method bfs_for_cex_in_tree (line 313) | def bfs_for_cex_in_tree(self, hypothesis):
method equivalence_query (line 344) | def equivalence_query(self, hypothesis):
method is_cex_processed (line 383) | def is_cex_processed(self, hypothesis, cex):
FILE: aalpy/learning_algs/stochastic_passive/ActiveAleriga.py
class Sampler (line 7) | class Sampler(ABC):
method sample (line 13) | def sample(self, sul, model):
class RandomWordSampler (line 30) | class RandomWordSampler(Sampler):
method __init__ (line 31) | def __init__(self, num_walks, min_walk_len, max_walk_len):
method sample (line 36) | def sample(self, sul, model):
function run_active_Alergia (line 55) | def run_active_Alergia(data, sul, sampler, n_iter, eps=0.05, compatibili...
FILE: aalpy/learning_algs/stochastic_passive/Alergia.py
class Alergia (line 13) | class Alergia:
method __init__ (line 14) | def __init__(self, data, automaton_type, eps=0.05, compatibility_check...
method compatibility_test (line 33) | def compatibility_test(self, a, b):
method merge (line 54) | def merge(self, red_state, blue_state):
method fold (line 64) | def fold(self, red, blue):
method run (line 73) | def run(self):
method normalize (line 115) | def normalize(self, red):
method to_automaton (line 128) | def to_automaton(self, red):
function run_Alergia (line 165) | def run_Alergia(data, automaton_type, eps=0.05, compatibility_checker=No...
function run_JAlergia (line 199) | def run_JAlergia(path_to_data_file, automaton_type, path_to_jAlergia_jar...
FILE: aalpy/learning_algs/stochastic_passive/CompatibilityChecker.py
class CompatibilityChecker (line 7) | class CompatibilityChecker(ABC):
method are_states_different (line 10) | def are_states_different(self, a: AlergiaPtaNode, b: AlergiaPtaNode, *...
class HoeffdingCompatibility (line 14) | class HoeffdingCompatibility(CompatibilityChecker):
method __init__ (line 15) | def __init__(self, eps):
method hoeffding_bound (line 19) | def hoeffding_bound(self, a: dict, b: dict):
method are_states_different (line 36) | def are_states_different(self, a: AlergiaPtaNode, b: AlergiaPtaNode, *...
FILE: aalpy/learning_algs/stochastic_passive/FPTA.py
class AlergiaPtaNode (line 5) | class AlergiaPtaNode:
method __init__ (line 9) | def __init__(self, output, prefix):
method successors (line 22) | def successors(self):
method get_inputs (line 25) | def get_inputs(self):
method get_input_frequency (line 28) | def get_input_frequency(self, target_input):
method get_output_frequencies (line 31) | def get_output_frequencies(self, target_input):
method get_immutable_inputs (line 34) | def get_immutable_inputs(self):
method get_immutable_input_frequency (line 37) | def get_immutable_input_frequency(self, target_input):
method get_original_output_frequencies (line 40) | def get_original_output_frequencies(self, target_input):
method __lt__ (line 43) | def __lt__(self, other):
method __le__ (line 46) | def __le__(self, other):
method __eq__ (line 49) | def __eq__(self, other):
function create_fpta (line 53) | def create_fpta(data, automaton_type):
FILE: aalpy/oracles/BreadthFirstExplorationEqOracle.py
class BreadthFirstExplorationEqOracle (line 8) | class BreadthFirstExplorationEqOracle(Oracle):
method __init__ (line 14) | def __init__(self, alphabet, sul: SUL, depth=5):
method find_cex (line 35) | def find_cex(self, hypothesis):
FILE: aalpy/oracles/CacheBasedEqOracle.py
class CacheBasedEqOracle (line 7) | class CacheBasedEqOracle(Oracle):
method __init__ (line 14) | def __init__(self, alphabet: list, sul: SUL, num_walks=100, depth_incr...
method find_cex (line 38) | def find_cex(self, hypothesis):
method get_paths (line 78) | def get_paths(self, t, paths=None, current_path=None):
FILE: aalpy/oracles/KWayStateCoverageEqOracle.py
class KWayStateCoverageEqOracle (line 7) | class KWayStateCoverageEqOracle(Oracle):
method __init__ (line 13) | def __init__(self, alphabet: list, sul: SUL, k=2, random_walk_len=20,
method find_cex (line 40) | def find_cex(self, hypothesis):
FILE: aalpy/oracles/KWayTransitionCoverageEqOracle.py
class KWayTransitionCoverageEqOracle (line 11) | class KWayTransitionCoverageEqOracle(Oracle):
method __init__ (line 18) | def __init__(self, alphabet: list, sul: SUL, k: int = 2, method='random',
method find_cex (line 64) | def find_cex(self, hypothesis: Automaton):
method greedy_set_cover (line 114) | def greedy_set_cover(self, hypothesis: Automaton, paths: list):
method select_optimal_path (line 138) | def select_optimal_path(self, covered: set, paths: list) -> Path:
method generate_random_paths (line 150) | def generate_random_paths(self, hypothesis: Automaton) -> list:
method generate_prefix_steps (line 161) | def generate_prefix_steps(self, hypothesis: Automaton):
method create_path (line 167) | def create_path(self, hypothesis: Automaton, steps: tuple) -> Path:
method check_path (line 193) | def check_path(self, hypothesis: Automaton, steps: tuple):
FILE: aalpy/oracles/PacOracle.py
class PacOracle (line 7) | class PacOracle(Oracle):
method __init__ (line 16) | def __init__(self, alphabet: list, sul: SUL, epsilon=0.01, delta=0.01,...
method find_cex (line 25) | def find_cex(self, hypothesis):
FILE: aalpy/oracles/PerfectKnowledgeEqOracle.py
class PerfectKnowledgeEqOracle (line 5) | class PerfectKnowledgeEqOracle(Oracle):
method __init__ (line 10) | def __init__(self, alphabet: list, sul: SUL, model_under_learning: Det...
method find_cex (line 14) | def find_cex(self, hypothesis):
FILE: aalpy/oracles/ProvidedSequencesOracleWrapper.py
class ProvidedSequencesOracleWrapper (line 4) | class ProvidedSequencesOracleWrapper(Oracle):
method __init__ (line 5) | def __init__(self, alphabet: list, sul: SUL, oracle: Oracle, provided_...
method find_cex (line 21) | def find_cex(self, hypothesis):
FILE: aalpy/oracles/RandomWalkEqOracle.py
class RandomWalkEqOracle (line 9) | class RandomWalkEqOracle(Oracle):
method __init__ (line 15) | def __init__(self, alphabet: list, sul: SUL, num_steps=5000, reset_aft...
method find_cex (line 38) | def find_cex(self, hypothesis):
method reset_counter (line 88) | def reset_counter(self):
FILE: aalpy/oracles/RandomWordEqOracle.py
class RandomWordEqOracle (line 10) | class RandomWordEqOracle(Oracle):
method __init__ (line 15) | def __init__(self, alphabet: list, sul: SUL, num_walks=500, min_walk_l...
method find_cex (line 43) | def find_cex(self, hypothesis):
method reset_counter (line 97) | def reset_counter(self):
FILE: aalpy/oracles/StatePrefixEqOracle.py
class StatePrefixEqOracle (line 7) | class StatePrefixEqOracle(Oracle):
method __init__ (line 15) | def __init__(self, alphabet: list, sul: SUL, walks_per_state=25, walk_...
method find_cex (line 40) | def find_cex(self, hypothesis):
FILE: aalpy/oracles/TransitionFocusOracle.py
class TransitionFocusOracle (line 7) | class TransitionFocusOracle(Oracle):
method __init__ (line 14) | def __init__(self, alphabet, sul: SUL, num_random_walks=500, walk_len=...
method find_cex (line 29) | def find_cex(self, hypothesis):
FILE: aalpy/oracles/UserInputEqOracle.py
class UserInputEqOracle (line 5) | class UserInputEqOracle(Oracle):
method __init__ (line 25) | def __init__(self, alphabet: list, sul: SUL):
method find_cex (line 29) | def find_cex(self, hypothesis):
FILE: aalpy/oracles/WMethodEqOracle.py
class WMethodEqOracle (line 8) | class WMethodEqOracle(Oracle):
method __init__ (line 14) | def __init__(self, alphabet: list, sul: SUL, max_number_of_states):
method test_suite (line 27) | def test_suite(self, cover, depth, char_set):
method find_cex (line 47) | def find_cex(self, hypothesis):
class RandomWMethodEqOracle (line 81) | class RandomWMethodEqOracle(Oracle):
method __init__ (line 88) | def __init__(self, alphabet: list, sul: SUL, walks_per_state=25, walk_...
method find_cex (line 106) | def find_cex(self, hypothesis):
FILE: aalpy/oracles/WpMethodEqOracle.py
function state_characterization_set (line 7) | def state_characterization_set(hypothesis, alphabet, state):
function first_phase_it (line 25) | def first_phase_it(alphabet, state_cover, depth, char_set):
function second_phase_it (line 43) | def second_phase_it(hyp, alphabet, difference, depth):
class WpMethodEqOracle (line 66) | class WpMethodEqOracle(Oracle):
method __init__ (line 71) | def __init__(self, alphabet: list, sul: SUL, max_number_of_states=4):
method find_cex (line 76) | def find_cex(self, hypothesis):
class RandomWpMethodEqOracle (line 116) | class RandomWpMethodEqOracle(Oracle):
method __init__ (line 126) | def __init__(
method find_cex (line 133) | def find_cex(self, hypothesis):
FILE: aalpy/utils/AutomatonGenerators.py
function generate_random_deterministic_automata (line 9) | def generate_random_deterministic_automata(automaton_type,
function generate_random_mealy_machine (line 146) | def generate_random_mealy_machine(num_states, input_alphabet, output_alp...
function generate_random_moore_machine (line 175) | def generate_random_moore_machine(num_states, input_alphabet, output_alp...
function generate_random_dfa (line 204) | def generate_random_dfa(num_states, alphabet, num_accepting_states=1,
function generate_random_mdp (line 236) | def generate_random_mdp(num_states, input_size, output_size, possible_pr...
function generate_random_smm (line 299) | def generate_random_smm(num_states, input_size, output_size, possible_pr...
function generate_random_ONFSM (line 360) | def generate_random_ONFSM(num_states, num_inputs, num_outputs, multiple_...
function generate_random_markov_chain (line 404) | def generate_random_markov_chain(num_states):
function _has_transition (line 429) | def _has_transition(state: SevpaState, transition_letter, stack_guard) -...
function generate_random_sevpa (line 444) | def generate_random_sevpa(num_states, internal_alphabet_size, call_alpha...
FILE: aalpy/utils/BenchmarkSULs.py
function get_Angluin_dfa (line 1) | def get_Angluin_dfa():
function get_benchmark_ONFSM (line 14) | def get_benchmark_ONFSM():
function get_ONFSM (line 43) | def get_ONFSM():
function get_faulty_coffee_machine_MDP (line 89) | def get_faulty_coffee_machine_MDP():
function get_weird_coffee_machine_MDP (line 109) | def get_weird_coffee_machine_MDP():
function get_faulty_coffee_machine_SMM (line 157) | def get_faulty_coffee_machine_SMM():
function get_minimal_faulty_coffee_machine_SMM (line 177) | def get_minimal_faulty_coffee_machine_SMM():
function get_faulty_mqtt_SMM (line 194) | def get_faulty_mqtt_SMM():
function get_small_gridworld (line 226) | def get_small_gridworld():
class MockMqttExample (line 276) | class MockMqttExample:
method __init__ (line 278) | def __init__(self):
method subscribe (line 282) | def subscribe(self, topic: str):
method unsubscribe (line 292) | def unsubscribe(self, topic):
method connect (line 303) | def connect(self):
method disconnect (line 311) | def disconnect(self):
method publish (line 316) | def publish(self, topic):
class DateValidator (line 328) | class DateValidator:
method is_date_accepted (line 335) | def is_date_accepted(self, date_string: str):
function get_small_pomdp (line 361) | def get_small_pomdp():
function is_balanced (line 390) | def is_balanced(test_string, call_return_map, allow_empty_string):
function get_balanced_string_sul (line 411) | def get_balanced_string_sul(call_return_map, allow_empty_string):
FILE: aalpy/utils/BenchmarkSevpaModels.py
function sevpa_for_L1 (line 5) | def sevpa_for_L1():
function sevpa_for_L2 (line 15) | def sevpa_for_L2():
function sevpa_for_L3 (line 28) | def sevpa_for_L3():
function sevpa_for_L4 (line 53) | def sevpa_for_L4():
function sevpa_for_L5 (line 65) | def sevpa_for_L5():
function sevpa_for_L7 (line 80) | def sevpa_for_L7():
function sevpa_for_L8 (line 98) | def sevpa_for_L8():
function sevpa_for_L9 (line 118) | def sevpa_for_L9():
function sevpa_for_L10 (line 143) | def sevpa_for_L10():
function sevpa_for_L11 (line 174) | def sevpa_for_L11():
function sevpa_for_L12 (line 207) | def sevpa_for_L12():
function sevpa_for_L13 (line 220) | def sevpa_for_L13():
function sevpa_for_L14 (line 239) | def sevpa_for_L14():
function sevpa_for_L15 (line 262) | def sevpa_for_L15():
FILE: aalpy/utils/BenchmarkVpaModels.py
function vpa_L1 (line 6) | def vpa_L1():
function vpa_L2 (line 24) | def vpa_L2():
function vpa_for_L3 (line 43) | def vpa_for_L3():
function vpa_L3 (line 72) | def vpa_L3():
function vpa_L4 (line 93) | def vpa_L4():
function vpa_L6 (line 116) | def vpa_L6():
function vpa_L8 (line 142) | def vpa_L8():
function vpa_L9 (line 171) | def vpa_L9():
function vpa_L10 (line 197) | def vpa_L10():
function vpa_L11 (line 223) | def vpa_L11():
function vpa_L12 (line 252) | def vpa_L12():
function vpa_for_L16 (line 280) | def vpa_for_L16():
function vpa_for_odd_parentheses (line 300) | def vpa_for_odd_parentheses():
function vpa_for_even_parentheses (line 327) | def vpa_for_even_parentheses():
function gen_arithmetic_data (line 353) | def gen_arithmetic_data(num_sequences=3000, min_seq_len=2, max_seq_len=8):
function vpa_json (line 404) | def vpa_json():
function get_all_VPAs (line 460) | def get_all_VPAs():
FILE: aalpy/utils/DataHandler.py
class DataHandler (line 4) | class DataHandler(ABC):
method tokenize_data (line 11) | def tokenize_data(self, path):
class CharacterTokenizer (line 15) | class CharacterTokenizer(DataHandler):
method tokenize_data (line 22) | def tokenize_data(self, path):
class DelimiterTokenizer (line 30) | class DelimiterTokenizer(DataHandler):
method tokenize_data (line 37) | def tokenize_data(self, path, delimiter=','):
class IODelimiterTokenizer (line 45) | class IODelimiterTokenizer(DataHandler):
method tokenize_data (line 53) | def tokenize_data(self, path, io_delimiter='/', word_delimiter=','):
function try_int (line 70) | def try_int(x):
FILE: aalpy/utils/FileHandler.py
function _wrap_label (line 18) | def _wrap_label(label):
function _get_node (line 27) | def _get_node(state, automaton_type):
function _add_transition_to_graph (line 51) | def _add_transition_to_graph(graph, state, automaton_type, display_same_...
function visualize_automaton (line 129) | def visualize_automaton(automaton, path="LearnedModel", file_type="pdf",...
function save_automaton_to_file (line 157) | def save_automaton_to_file(automaton, path="LearnedModel", file_type="dot",
function _process_label (line 227) | def _process_label(label, source, destination, automaton_type):
function _process_node_label (line 291) | def _process_node_label(node, label, node_label_dict, node_type, automat...
function _strip_label (line 308) | def _strip_label(label: str) -> str:
function _process_node_label_prime (line 318) | def _process_node_label_prime(node_name, label, line, node_label_dict, n...
function load_automaton_from_file (line 344) | def load_automaton_from_file(path, automaton_type, compute_prefixes=False):
FILE: aalpy/utils/HelperFunctions.py
function extend_set (line 9) | def extend_set(list_to_extend: list, new_elements: list) -> list:
function all_prefixes (line 26) | def all_prefixes(li):
function all_suffixes (line 40) | def all_suffixes(li):
function profile_function (line 54) | def profile_function(function: callable, sort_key='cumtime'):
function random_string_generator (line 72) | def random_string_generator(size=10, chars=string.ascii_lowercase + stri...
function print_learning_info (line 88) | def print_learning_info(info: dict):
function print_observation_table (line 115) | def print_observation_table(ot, table_type):
function is_suffix_of (line 174) | def is_suffix_of(suffix, trace) -> bool:
function get_cex_prefixes (line 191) | def get_cex_prefixes(cex, automaton_type):
function get_available_oracles_and_err_msg (line 208) | def get_available_oracles_and_err_msg():
function make_input_complete (line 220) | def make_input_complete(automaton, missing_transition_go_to='self_loop'):
function convert_i_o_traces_for_RPNI (line 282) | def convert_i_o_traces_for_RPNI(sequences, automaton_type="mealy"):
function visualize_classification_tree (line 312) | def visualize_classification_tree(root_node):
function is_balanced (line 339) | def is_balanced(input_seq, vpa_alphabet):
function generate_input_output_data_from_automata (line 351) | def generate_input_output_data_from_automata(model, num_sequences=4000, ...
function generate_input_output_data_from_vpa (line 374) | def generate_input_output_data_from_vpa(vpa, num_sequences=1000, max_seq...
function product_with_possible_empty_iterable (line 414) | def product_with_possible_empty_iterable(*iterables, repeat=1):
function dfa_from_moore (line 422) | def dfa_from_moore(moore_model: MooreMachine) -> Dfa:
function mc_from_mdp (line 442) | def mc_from_mdp(mdp: Mdp, input_symbol=None) -> MarkovChain:
function mc_format_to_mdp (line 459) | def mc_format_to_mdp(data):
FILE: aalpy/utils/ModelChecking.py
function get_properties_file (line 18) | def get_properties_file(exp_name):
function get_correct_prop_values (line 31) | def get_correct_prop_values(exp_name):
function _target_string (line 53) | def _target_string(target, orig_id_to_int_id):
function _sanitize_for_prism (line 60) | def _sanitize_for_prism(symbol):
function mdp_2_prism_format (line 67) | def mdp_2_prism_format(mdp: Mdp, name: str, output_path=None):
function evaluate_all_properties (line 126) | def evaluate_all_properties(prism_file_name, properties_file_name):
function model_check_properties (line 151) | def model_check_properties(model: Mdp, properties: str):
function model_check_experiment (line 175) | def model_check_experiment(path_to_properties, correct_prop_values, mdp,...
function stop_based_on_confidence (line 201) | def stop_based_on_confidence(hypothesis, property_based_stopping, print_...
function bisimilar (line 238) | def bisimilar(a1: DeterministicAutomaton, a2: DeterministicAutomaton, re...
function compare_automata (line 287) | def compare_automata(aut_1: DeterministicAutomaton, aut_2: Deterministic...
class TestCaseWrapperSUL (line 348) | class TestCaseWrapperSUL(SUL):
method __init__ (line 349) | def __init__(self, sul):
method pre (line 356) | def pre(self):
method post (line 361) | def post(self):
method step (line 366) | def step(self, letter):
function generate_test_cases (line 373) | def generate_test_cases(automaton: DeterministicAutomaton, oracle):
function statistical_model_checking (line 400) | def statistical_model_checking(model, goals, max_num_steps, num_tests=10...
FILE: aalpy/utils/Sampling.py
function get_io_traces (line 7) | def get_io_traces(automaton: Automaton, input_traces: list) -> list:
function get_labeled_sequences (line 21) | def get_labeled_sequences(automaton: Automaton, input_traces: list) -> l...
function get_data_from_input_sequence (line 37) | def get_data_from_input_sequence(automaton: Automaton, input_sequence: l...
function support_automaton_arg (line 46) | def support_automaton_arg(require_transform):
function sample_with_length_limits (line 66) | def sample_with_length_limits(alphabet, nr_samples, min_len, max_len):
function sample_with_term_prob (line 71) | def sample_with_term_prob(alphabet, nr_samples, term_prob):
function get_complete_sample (line 82) | def get_complete_sample(automaton: DeterministicAutomaton):
FILE: tests/oracles/test_baseOracle.py
class BaseOracleTests (line 9) | class BaseOracleTests(unittest.TestCase):
method generate_dfa_suls (line 14) | def generate_dfa_suls(self, number_of_states=10, alphabet_size=10, num...
method test_validate_eq_oracle (line 35) | def test_validate_eq_oracle(self, alphabet, eq_oracle, learning_sul, v...
FILE: tests/oracles/test_kWayTransitionCoverageEqOracle.py
class KWayTransitionCoverageEqOracleTests (line 7) | class KWayTransitionCoverageEqOracleTests(BaseOracleTests):
method test_default (line 9) | def test_default(self):
method test_k_4 (line 15) | def test_k_4(self):
method test_method_prefix (line 22) | def test_method_prefix(self):
method test_max_number_of_steps_10 (line 30) | def test_max_number_of_steps_10(self):
method test_default_large_dfa (line 37) | def test_default_large_dfa(self):
FILE: tests/test_charSet.py
class TestCharSet (line 7) | class TestCharSet(unittest.TestCase):
method get_test_automata (line 9) | def get_test_automata(self):
method test_can_differentiate (line 20) | def test_can_differentiate(self):
method test_suffix_closed (line 47) | def test_suffix_closed(self):
FILE: tests/test_deterministic.py
class DeterministicTest (line 17) | class DeterministicTest(unittest.TestCase):
method prove_equivalence (line 19) | def prove_equivalence(self, learned_automaton):
method test_closing_strategies (line 30) | def test_closing_strategies(self):
method test_suffix_closedness (line 53) | def test_suffix_closedness(self):
method test_cex_processing (line 78) | def test_cex_processing(self):
method test_eq_oracles (line 100) | def test_eq_oracles(self):
method test_all_configuration_combinations (line 145) | def test_all_configuration_combinations(self):
FILE: tests/test_deterministic_passive.py
class DeterministicPassiveTest (line 16) | class DeterministicPassiveTest(unittest.TestCase):
method prove_equivalence (line 18) | def prove_equivalence(self, learned_automaton):
method generate_data (line 29) | def generate_data(self, ground_truth, depth=5, step=1):
method test_all_configuration_combinations (line 43) | def test_all_configuration_combinations(self):
method test_all_configuration_combinations_input_incomplete_data (line 68) | def test_all_configuration_combinations_input_incomplete_data(self):
FILE: tests/test_file_operations.py
class TestFileHandler (line 7) | class TestFileHandler(unittest.TestCase):
method test_saving_loading (line 9) | def test_saving_loading(self):
FILE: tests/test_non_deterministic.py
class NonDeterministicTest (line 4) | class NonDeterministicTest(unittest.TestCase):
method test_non_det (line 6) | def test_non_det(self):
FILE: tests/test_rwpmethod_oracle.py
class TestRandomWpMethodOracle (line 25) | class TestRandomWpMethodOracle(unittest.TestCase):
method gen_moore_from_state_setup (line 27) | def gen_moore_from_state_setup(state_setup) -> MooreMachine:
method generate_real_automata (line 49) | def generate_real_automata(self) -> MooreMachine:
method generate_hypothesis (line 63) | def generate_hypothesis(self) -> MooreMachine:
method test_rwpmethod_oracle (line 75) | def test_rwpmethod_oracle(self):
method test_rwpmethod_oracle_with_lstar (line 91) | def test_rwpmethod_oracle_with_lstar(self):
FILE: tests/test_stochastic.py
class StochasticTest (line 10) | class StochasticTest(unittest.TestCase):
method test_learning_based_on_accuracy_based_stopping (line 12) | def test_learning_based_on_accuracy_based_stopping(self):
FILE: tests/test_wmethod_oracle.py
class TestWMethodOracle (line 25) | class TestWMethodOracle(unittest.TestCase):
method gen_moore_from_state_setup (line 27) | def gen_moore_from_state_setup(state_setup) -> MooreMachine:
method generate_real_automata (line 49) | def generate_real_automata(self) -> MooreMachine:
method generate_hypothesis (line 63) | def generate_hypothesis(self) -> MooreMachine:
method test_wmethod_oracle (line 75) | def test_wmethod_oracle(self):
method test_wmethod_oracle_with_lstar (line 89) | def test_wmethod_oracle_with_lstar(self):
FILE: tests/test_wpmethod_oracle.py
class TestWpMethodOracle (line 25) | class TestWpMethodOracle(unittest.TestCase):
method gen_moore_from_state_setup (line 27) | def gen_moore_from_state_setup(state_setup) -> MooreMachine:
method generate_real_automata (line 49) | def generate_real_automata(self) -> MooreMachine:
method generate_hypothesis (line 63) | def generate_hypothesis(self) -> MooreMachine:
method test_wpmethod_oracle (line 75) | def test_wpmethod_oracle(self):
method test_wpmethod_oracle_with_lstar (line 91) | def test_wpmethod_oracle_with_lstar(self):
FILE: tests/tests_imports.py
class ImportTest (line 4) | class ImportTest(unittest.TestCase):
method test_imports (line 6) | def test_imports(self):
Condensed preview — 225 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (5,946K chars).
[
{
"path": ".gitattributes",
"chars": 48,
"preview": "* linguist-vendored\n*.py linguist-vendored=false"
},
{
"path": ".github/workflows/codeql-analysis.yml",
"chars": 2437,
"preview": "# For most projects, this workflow file will not need changing; you simply need\n# to commit it to your repository.\n#\n# Y"
},
{
"path": ".github/workflows/python-app.yml",
"chars": 1099,
"preview": "# This workflow will install Python dependencies, run tests and lint with a single version of Python\n# For more informat"
},
{
"path": ".gitignore",
"chars": 1864,
"preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\nmain.py\nLearnedModel.pdf"
},
{
"path": "Benchmarking/Benchmark_ErrorStop.py",
"chars": 7676,
"preview": "import random\nimport os\n\nimport aalpy.paths\n\nfrom aalpy.SULs import AutomatonSUL\nfrom aalpy.learning_algs import run_sto"
},
{
"path": "Benchmarking/CompleteStochasticBenchmarking.py",
"chars": 6127,
"preview": "import random\nimport time\n\nimport aalpy.paths\n\nfrom aalpy.SULs import AutomatonSUL\nfrom aalpy.learning_algs import run_s"
},
{
"path": "Benchmarking/StochasticAlgComparison.py",
"chars": 4049,
"preview": "import random\nimport time\nfrom statistics import mean\n\nimport aalpy.paths\n\nfrom aalpy.SULs import AutomatonSUL\nfrom aalp"
},
{
"path": "Benchmarking/StochasticBenchmarkingWPrism.py",
"chars": 5596,
"preview": "import random\n\nimport aalpy.paths\n\nfrom aalpy.SULs import AutomatonSUL\nfrom aalpy.learning_algs import run_stochastic_Ls"
},
{
"path": "Benchmarking/StopWithErorrRate.py",
"chars": 3241,
"preview": "import pickle\nimport random\nimport time\nfrom collections import defaultdict\nfrom statistics import mean\n\nimport aalpy.pa"
},
{
"path": "Benchmarking/benchmark.py",
"chars": 1455,
"preview": "import os\nfrom statistics import mean\n\nfrom aalpy.SULs import AutomatonSUL\nfrom aalpy.learning_algs import run_Lstar\nfro"
},
{
"path": "Benchmarking/benchmark_alphabet_increase.py",
"chars": 3039,
"preview": "from statistics import mean\nimport csv\n\nfrom aalpy.SULs import AutomatonSUL\nfrom aalpy.learning_algs import run_Lstar\nfr"
},
{
"path": "Benchmarking/benchmark_size_increase.py",
"chars": 3611,
"preview": "from statistics import mean\nimport csv\n\nfrom aalpy.SULs import AutomatonSUL\nfrom aalpy.learning_algs import run_Lstar\nfr"
},
{
"path": "Benchmarking/cex_processing_benchmark.py",
"chars": 2459,
"preview": "from collections import defaultdict\nfrom statistics import mean, stdev\n\nfrom aalpy.learning_algs import run_KV, run_Lsta"
},
{
"path": "Benchmarking/compare_lstar_and_kv.py",
"chars": 2410,
"preview": "from aalpy.SULs import AutomatonSUL\nfrom aalpy.learning_algs import run_Lstar, run_KV\nfrom aalpy.oracles import RandomWo"
},
{
"path": "Benchmarking/error_benchmark_statistics.py",
"chars": 3876,
"preview": "import csv\nimport os\nfrom collections import defaultdict\nfrom statistics import mean\n\ndirectory = 'FM_mdp_smm_error_base"
},
{
"path": "Benchmarking/evaluate_l_star_configurations.py",
"chars": 8090,
"preview": "import pickle\nfrom collections import defaultdict\nfrom random import seed\nfrom statistics import mean\n\nfrom aalpy.SULs i"
},
{
"path": "Benchmarking/fm_benchmark.py",
"chars": 5205,
"preview": "import csv\nimport os\nfrom collections import defaultdict\nfrom statistics import mean\n\n# directory = 'FM_mdp_smm/benchmar"
},
{
"path": "Benchmarking/fm_plots.py",
"chars": 3475,
"preview": "\n\ndef plot_error():\n import matplotlib.pyplot as plt\n import matplotlib\n\n matplotlib.use(\"pgf\")\n matplotlib."
},
{
"path": "Benchmarking/generate_plots.py",
"chars": 13258,
"preview": "from random import random, randint\n\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport csv\n\ndef plot_increasi"
},
{
"path": "Benchmarking/json_lbt.py",
"chars": 12583,
"preview": "import json\nimport random\nfrom collections import defaultdict\n\nfrom aalpy import run_PAPNI, load_automaton_from_file\nfro"
},
{
"path": "Benchmarking/papni_vs_rpni_benchmarking.py",
"chars": 9626,
"preview": "import pickle\nfrom collections import defaultdict\nfrom random import shuffle\n\nfrom aalpy import run_RPNI, run_PAPNI, Aut"
},
{
"path": "Benchmarking/passive_mdp_vs_smm.py",
"chars": 2799,
"preview": "import random\n\nimport aalpy.paths\nfrom aalpy.SULs import AutomatonSUL\nfrom aalpy.automata.StochasticMealyMachine import "
},
{
"path": "Benchmarking/prism_eval_props/bluetooth.props",
"chars": 388,
"preview": "Pmax=? [ F<5 (\"crash\") ]\n\nPmax=? [ F<8 (\"crash\") ]\n\nPmax=? [ F<11 (\"crash\") ]\n\nPmax=? [ F<14 (\"crash\") ]\n\nPmax=? [ F<17 "
},
{
"path": "Benchmarking/prism_eval_props/emqtt_two_client.props",
"chars": 199,
"preview": "Pmax=? [ F<5 (\"c2_Pub_c2_my_topic_bye\") ]\n\nPmax=? [ F<5 (\"c1_crash\") ]\n\nPmax=? [ F<11 (\"c1_crash\") ]\n\nPmax=? [ F<17 (\"c1"
},
{
"path": "Benchmarking/prism_eval_props/first_eval.props",
"chars": 97,
"preview": "Pmax=?[F<12(\"goal\")]\n\nPmax=? [ !(\"grass\") U<=14 (\"goal\") ]\n\nPmax=? [ !(\"sand\") U<=16 (\"goal\") ]\n\n"
},
{
"path": "Benchmarking/prism_eval_props/second_eval.props",
"chars": 117,
"preview": "Pmax=?[F<15(\"goal\")]\n\nPmax=?[F<13(\"goal\")]\n\nPmax=? [ !(\"mud\") U<=18 (\"goal\") ]\n\nPmax=? [ !(\"sand\") U<=20 (\"goal\") ]\n\n"
},
{
"path": "Benchmarking/prism_eval_props/shared_coin_eval.props",
"chars": 332,
"preview": "Pmax=?[F (\"finished\" & \"c1_heads\" & \"c2_tails\")]\n\nPmax=?[F (\"finished\" & \"c1_tails\" & \"c2_tails\")]\n\nPmax=?[!\"five\" U \"fi"
},
{
"path": "Benchmarking/prism_eval_props/slot_machine_eval.props",
"chars": 195,
"preview": "Pmax=? [ F (\"Pr10\") ]\n\nPmax=? [ F (\"Pr2\") ]\n\nPmax=? [ F (\"Pr0\") ]\n\nPmax=? [ X (X (\"r220\")) ]\n\nPmax=? [ X(X (X (\"r122\")))"
},
{
"path": "Benchmarking/prism_eval_props/tcp_eval.props",
"chars": 107,
"preview": "\nPmax=? [ F<5 (\"crash\") ]\n\nPmax=? [ F<11 (\"crash\") ]\n\nPmax=? [ F<17 (\"crash\") ]\n\nPmax=? [ F<23 (\"crash\") ]\n"
},
{
"path": "Benchmarking/rpni_papni_memory_footrpint.py",
"chars": 3867,
"preview": "import sys\nfrom random import randint, random\nimport matplotlib.pyplot as plt\n\n# Data\nimport tikzplotlib\n\nfrom Benchmark"
},
{
"path": "Benchmarking/stochastic_benchmarking/Benchmark_ErrorStop.py",
"chars": 7643,
"preview": "import random\nimport os\n\nimport aalpy.paths\n\nfrom aalpy.SULs import AutomatonSUL\nfrom aalpy.learning_algs import run_sto"
},
{
"path": "Benchmarking/stochastic_benchmarking/CompleteStochasticBenchmarking.py",
"chars": 6137,
"preview": "import random\nimport time\n\nimport aalpy.paths\n\nfrom aalpy.SULs import AutomatonSUL\nfrom aalpy.learning_algs import run_s"
},
{
"path": "Benchmarking/stochastic_benchmarking/StochasticBenchmarkingWPrism.py",
"chars": 5403,
"preview": "import random\n\nimport aalpy.paths\n\nfrom aalpy.SULs import AutomatonSUL\nfrom aalpy.learning_algs import run_stochastic_Ls"
},
{
"path": "Benchmarking/stochastic_benchmarking/passive_mdp_vs_smm.py",
"chars": 2846,
"preview": "import random\nimport os\nimport aalpy.paths\nfrom aalpy.SULs import AutomatonSUL\nfrom aalpy.automata.StochasticMealyMachin"
},
{
"path": "Benchmarking/stochastic_benchmarking/plot_error_steps.py",
"chars": 4176,
"preview": "import matplotlib.pyplot as plt\n\nfrom aalpy.utils import load_automaton_from_file\nfrom aalpy.utils import statistical_mo"
},
{
"path": "Benchmarking/stochastic_benchmarking/stochastic_benchmark_random_automata.py",
"chars": 3592,
"preview": "from itertools import product\n\nfrom aalpy.SULs import AutomatonSUL\nfrom aalpy.learning_algs import run_stochastic_Lstar\n"
},
{
"path": "Benchmarking/stochastic_benchmarking/strategy_comp.py",
"chars": 4626,
"preview": "from statistics import mean\n\nfrom aalpy.learning_algs.stochastic.DifferenceChecker import AdvancedHoeffdingChecker, Hoef"
},
{
"path": "Benchmarking/stochastic_benchmarking/unamb_error_plot.py",
"chars": 50389,
"preview": "# max_err\n# acv_err\n# unamb\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nmqtt_max_err_1 = [100, 100, 100, 100, 100"
},
{
"path": "Benchmarking/unamb_error_plot.py",
"chars": 50389,
"preview": "# max_err\n# acv_err\n# unamb\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nmqtt_max_err_1 = [100, 100, 100, 100, 100"
},
{
"path": "Benchmarking/vpa_benchmarking/benchmark_vpa.py",
"chars": 11016,
"preview": "from collections import defaultdict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\n\nfrom aalpy.SULs.A"
},
{
"path": "DotModels/Angluin_Mealy.dot",
"chars": 355,
"preview": "digraph Angluin_Mealy {\ns0 [label=\"s0\"];\ns1 [label=\"s1\"];\ns2 [label=\"s2\"];\ns3 [label=\"s3\"];\ns0 -> s2 [label=\"a/0\"];\ns0 "
},
{
"path": "DotModels/Angluin_Moore.dot",
"chars": 447,
"preview": "digraph Angluin_Moore {\ns0 [label=\"s0|1\", shape=record, style=rounded];\ns1 [label=\"s1|0\", shape=record, style=rounded];\n"
},
{
"path": "DotModels/Bluetooth/CC2640R2-no-feature-req.dot",
"chars": 5753,
"preview": "digraph \"CC2640R2-no-feature\" {\ns0 [label=s0];\ns1 [label=s1];\ns2 [label=s2];\ns3 [label=s3];\ns4 [label=s4];\ns5 [label=s5]"
},
{
"path": "DotModels/Bluetooth/CC2640R2-no-feature-req_stochastic.dot",
"chars": 21408,
"preview": "digraph \"CC2640R2-no-feature-req-stochastic\" {\ns0 [label=init];\ns1 [label=\"BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_REQ\"];\ns2 "
},
{
"path": "DotModels/Bluetooth/CC2640R2-no-pairing-req.dot",
"chars": 3044,
"preview": "digraph CC2640R2 {\ns0 [label=s0];\ns1 [label=s1];\ns2 [label=s2];\ns3 [label=s3];\ns4 [label=s4];\ns5 [label=s5];\ns0 -> s0 ["
},
{
"path": "DotModels/Bluetooth/CC2650.dot",
"chars": 2823,
"preview": "digraph \"cc2650\" {\ns0 [label=s0];\ns1 [label=s1];\ns2 [label=s2];\ns3 [label=s3];\ns4 [label=s4];\ns0 -> s0 [label=\"scan_req"
},
{
"path": "DotModels/Bluetooth/CYBLE-416045-02.dot",
"chars": 1629,
"preview": "digraph \"cyble-416045-02\" {\ns0 [label=s0];\ns1 [label=s1];\ns2 [label=s2];\ns0 -> s0 [label=\"scan_req/Adv\"];\ns0 -> s1 [la"
},
{
"path": "DotModels/Bluetooth/CYBLE-416045-02_Crash_No_Response_stochastic.dot",
"chars": 9870,
"preview": "digraph \"CYBLE-416045-02_Crash_No_Response_stochastic\" {\n0 [label=___start___];\ns00 [label=E];\ns01 [label=Adv];\ns02 [lab"
},
{
"path": "DotModels/Bluetooth/CYW43455.dot",
"chars": 8669,
"preview": "digraph cyw43455 {\ns0 [label=s0];\ns1 [label=s1];\ns2 [label=s2];\ns3 [label=s3];\ns4 [label=s4];\ns5 [label=s5];\ns6 [label=s"
},
{
"path": "DotModels/Bluetooth/CYW43455_stochastic.dot",
"chars": 42886,
"preview": "digraph CYW43455_stochastic {\n0 [label=___start___];\ns00 [label=\"BTLE|BTLE_CTRL|BTLE_DATA|LL_LENGTH_RSP\"];\ns01 [label=E]"
},
{
"path": "DotModels/Bluetooth/bluetooth_model.dot",
"chars": 859,
"preview": "digraph LearnedModel {\ns0 [label=s0];\ns1 [label=s1];\ns2 [label=s2];\ns0 -> s0 [label=\"i0/o0\"];\ns0 -> s1 [label=\"i1/o1\"]"
},
{
"path": "DotModels/Bluetooth/bluetooth_reduced.dot",
"chars": 1677,
"preview": "digraph LearnedModel {\ns0 [label=s0];\ns1 [label=s1];\ns2 [label=s2];\ns0 -> s0 [label=\"scan_req/Adv\"];\ns0 -> s1 [label=\""
},
{
"path": "DotModels/Bluetooth/cc2652r1.dot",
"chars": 2112,
"preview": "digraph \"CC2652R1\" {\ns0 [label=s0];\ns1 [label=s1];\ns2 [label=s2];\ns3 [label=s3];\ns0 -> s0 [label=\"length_req/BTLE|BTLE_"
},
{
"path": "DotModels/Bluetooth/convert_to_stochastic.py",
"chars": 4737,
"preview": "import random\n\nfrom aalpy.SULs import AutomatonSUL\nfrom aalpy.base import SUL\nfrom aalpy.automata import Mdp, MdpState, "
},
{
"path": "DotModels/Bluetooth/nRF52832.dot",
"chars": 2932,
"preview": "digraph \"nRF52832\" {\ns0 [label=s0];\ns1 [label=s1];\ns2 [label=s2];\ns3 [label=s3];\ns4 [label=s4];\ns0 -> s0 [label=\"scan_r"
},
{
"path": "DotModels/MDPs/bluetooth.dot",
"chars": 37166,
"preview": "digraph bluetooth {\n0 [label=\"___start___\"];\ns00 [label=\"BTLE_BTLE_CTRL_BTLE_DATA_LL_REJECT_IND\"];\ns01 [label=\"crash\"];\n"
},
{
"path": "DotModels/MDPs/faulty_car_alarm.dot",
"chars": 1070,
"preview": "digraph faulty_car_alarm {\nq1_locked_closed [label=\"N\"];\nq2_locked_open [label=\"A\"];\nq3_locked_closed [label=\"A\"];\nq5_un"
},
{
"path": "DotModels/MDPs/first_grid.dot",
"chars": 7122,
"preview": "digraph first_grid {\n28 [label=\"mud\"];\n17 [label=\"grass\"];\n25 [label=\"wall\"];\n24 [label=\"concrete\"];\n13 [label=\"wall\"];\n"
},
{
"path": "DotModels/MDPs/mqtt.dot",
"chars": 28281,
"preview": "digraph mqtt {\n39 [label=\"c1_ConnAck__c2_Empty\"];\n40 [label=\"c1_ConnectionClosed_eof_stream__c2_SubAck\"];\n9 [label=\"c2_U"
},
{
"path": "DotModels/MDPs/second_grid.dot",
"chars": 15182,
"preview": "digraph second_grid {\n21 [label=\"wall\"];\n52 [label=\"sand\"];\n7 [label=\"sand\"];\n13 [label=\"wall\"];\n44 [label=\"grass\"];\n16 "
},
{
"path": "DotModels/MDPs/shared_coin.dot",
"chars": 31245,
"preview": "digraph shared_coin {\n134 [label=\"agree__six__c1_heads__c2_heads\"];\n259 [label=\"ten__c1_tails__c2_heads\"];\n222 [label=\"a"
},
{
"path": "DotModels/MDPs/slot_machine.dot",
"chars": 57686,
"preview": "digraph slot_machine {\n263 [label=\"Pr0\"];\n308 [label=\"end\"];\n163 [label=\"r012\"];\n131 [label=\"Pr0\"];\n143 [label=\"end\"];\n1"
},
{
"path": "DotModels/MDPs/tcp.dot",
"chars": 90177,
"preview": "digraph tcp {\n8 [label=\"ACK_p_NEXT_c_CURRENT_c_0_p\"];\n25 [label=\"ACK_plus_RST_p_ZERO_c_NEXT_c_0_p\"];\n107 [label=\"ACK_p_N"
},
{
"path": "DotModels/MQTT/ActiveMQ__two_client_will_retain.dot",
"chars": 11993,
"preview": "digraph g {\n__start0 [label=\"\" shape=\"none\"];\n\n\ts0 [shape=\"circle\" label=\"s0\"];\n\ts1 [shape=\"circle\" label=\"s1\"];\n\ts2 [sh"
},
{
"path": "DotModels/MQTT/VerneMQ__two_client_will_retain.dot",
"chars": 11230,
"preview": "digraph g {\n__start0 [label=\"\" shape=\"none\"];\n\n\ts0 [shape=\"circle\" label=\"s0\"];\n\ts1 [shape=\"circle\" label=\"s1\"];\n\ts2 [sh"
},
{
"path": "DotModels/MQTT/emqtt__two_client_will_retain.dot",
"chars": 11985,
"preview": "digraph g {\n__start0 [label=\"\" shape=\"none\"];\n\n\ts0 [shape=\"circle\" label=\"s0\"];\n\ts1 [shape=\"circle\" label=\"s1\"];\n\ts2 [sh"
},
{
"path": "DotModels/MQTT/hbmqtt__two_client_will_retain.dot",
"chars": 10803,
"preview": "digraph g {\n__start0 [label=\"\" shape=\"none\"];\n\n\ts0 [shape=\"circle\" label=\"s0\"];\n\ts1 [shape=\"circle\" label=\"s1\"];\n\ts2 [sh"
},
{
"path": "DotModels/MQTT/mosquitto__two_client_will_retain.dot",
"chars": 12047,
"preview": "digraph g {\n__start0 [label=\"\" shape=\"none\"];\n\n\ts0 [shape=\"circle\" label=\"s0\"];\n\ts1 [shape=\"circle\" label=\"s1\"];\n\ts2 [sh"
},
{
"path": "DotModels/SimpleABC/simple_abc_dfa.dot",
"chars": 271,
"preview": "digraph simpleABCDfa {\ns0 [label=\"s0\"];\ns1 [label=\"s1\", shape=doublecircle];\ns0 -> s1 [label=\"a\"];\ns0 -> s0 [label=\"b\"];"
},
{
"path": "DotModels/SimpleABC/simple_abc_mealy.dot",
"chars": 175,
"preview": "digraph simpleABCmealy{\ns0 [label=\"s0\"];\ns0 -> s0 [label=\"a/1\"];\ns0 -> s0 [label=\"b/2\"];\ns0 -> s0 [label=\"c/3\"];\n__start"
},
{
"path": "DotModels/SimpleABC/simple_abc_moore.dot",
"chars": 543,
"preview": "digraph simpleABCmoore{\ns0 [label=\"s0|0\", shape=record, style=rounded];\ns1 [label=\"s1|1\", shape=record, style=rounded];\n"
},
{
"path": "DotModels/TCP/TCP_Linux_Client.dot",
"chars": 7057,
"preview": "digraph \"TrainingDataAndAutomata/TCP_Linux_Client1.dot\" {\ns0 [label=s0];\ns1 [label=s1];\ns2 [label=s2];\ns3 [label=s3];\ns4"
},
{
"path": "DotModels/TCP/tcp_server_bsd_trans.dot",
"chars": 32613,
"preview": "digraph G {\n__start0 [label=\"\" shape=\"none\"];\ns0 [shape=\"circle\" label=\"s0\"];\ns0 -> s0[label=\"CLOSECONNECTION/TIMEOUT\"];"
},
{
"path": "DotModels/TCP/tcp_server_ubuntu_trans.dot",
"chars": 31286,
"preview": "digraph G {\n__start0 [label=\"\" shape=\"none\"];\ns0 [shape=\"circle\" label=\"s0\"];\ns0 -> s0[label=\"CLOSECONNECTION/TIMEOUT\"];"
},
{
"path": "DotModels/TCP/tcp_server_windows_trans.dot",
"chars": 22069,
"preview": "digraph G {\n__start0 [label=\"\" shape=\"none\"];\ns0 [shape=\"circle\" label=\"s0\"];\ns0 -> s0[label=\"CLOSECONNECTION/TIMEOUT\"];"
},
{
"path": "DotModels/TLS/JSSE_1.8.0_25_server_regular.dot",
"chars": 3811,
"preview": "digraph g {\n__start0 [shape=\"none\", label=\"\"];\ns0 [shape=\"circle\", label=\"0\"];\ns1 [shape=\"circle\", label=\"1\"];\ns2 [shape"
},
{
"path": "DotModels/TLS/NSS_3.17.4_server_regular.dot",
"chars": 4382,
"preview": "digraph {\n0 [label=\"s0\"]\n1 [label=\"s1\"]\n2 [label=\"s2\"]\n3 [label=\"s3\"]\n4 [label=\"s4\"]\n5 [label=\"s5\"]\n6 [label=\"s6\"]\n7 [la"
},
{
"path": "DotModels/TLS/OpenSSL_1.0.2_server_regular.dot",
"chars": 3290,
"preview": "digraph {\n6 [label=\"s6\"]\n0 [label=\"s0\"]\n1 [label=\"s1\"]\n2 [label=\"s2\"]\n3 [label=\"s3\"]\n4 [label=\"s4\"]\n5 [label=\"s5\"]\n6 -> "
},
{
"path": "DotModels/TLS/RSA_BSAFE_C_4.0.4_server_regular.dot",
"chars": 4461,
"preview": "digraph {\n6 [label=\"s6\"]\n0 [label=\"s0\"]\n1 [label=\"s1\"]\n2 [label=\"s2\"]\n3 [label=\"s3\"]\n4 [label=\"s4\"]\n5 [label=\"s5\"]\n7 [la"
},
{
"path": "DotModels/TLS/miTLS_0.1.3_server_regular.dot",
"chars": 3437,
"preview": "digraph {\n2 [label=\"s2\"]\n0 [label=\"s0\"]\n1 [label=\"s1\"]\n3 [label=\"s3\"]\n4 [label=\"s4\"]\n5 [label=\"s5\"]\n2 -> 5 [label=\"Appli"
},
{
"path": "DotModels/arithmetics.dot",
"chars": 264,
"preview": "digraph learnedModel {\ns0 [label=\"s0\", shape=circle];\ns1 [label=\"s1\", shape=doublecircle];\ns0 -> s1 [label=\"1\"];\ns0 -> "
},
{
"path": "DotModels/car_alarm.dot",
"chars": 883,
"preview": "digraph car_alarm {\nq1_locked_closed [label=\"N\"];\nq2_locked_open [label=\"A\"];\nq3_locked_closed [label=\"A\"];\nq5_unlocked_"
},
{
"path": "DotModels/coffee_mealy.dot",
"chars": 254,
"preview": "digraph coffee_mealy {\ns0 [label=\"s0\"];\ns1 [label=\"s1\"];\ns0 -> s1 [label=\"coin/ beep\"];\ns0 -> s0 [label=\"button/ init\""
},
{
"path": "DotModels/coffee_moore.dot",
"chars": 457,
"preview": "digraph g {\n __start0 [label=\"\" shape=\"none\"];\n __start0 -> A;\n \tA [shape=\"record\", style=\"rounded\", la"
},
{
"path": "DotModels/five_clients_mqtt_abstracted_onfsm.dot",
"chars": 597664,
"preview": "digraph g {\n\n\ts0 [shape=\"circle\" label=\"0\"];\n\ts1 [shape=\"circle\" label=\"1\"];\n\ts2 [shape=\"circle\" label=\"2\"];\n\ts3 [shape="
},
{
"path": "DotModels/mooreModel.dot",
"chars": 1086,
"preview": "digraph g {\n __start0 [label=\"\" shape=\"none\"];\n __start0 -> A;\n\n \tA [shape=\"record\", style=\"rounded\", l"
},
{
"path": "DotModels/onfsm_0.dot",
"chars": 241,
"preview": "digraph g {\n__start0 [label=\"\" shape=\"none\"];\nq0 [shape=\"circle\" margin=0 label=\"q0\"];\nq1 [shape=\"circle\" margin=0 label"
},
{
"path": "DotModels/onfsm_1.dot",
"chars": 378,
"preview": "digraph g {\n__start0 [label=\"\" shape=\"none\"];\nq0 [shape=\"circle\" margin=0 label=\"q0\"];\nq2 [shape=\"circle\" margin=0 label"
},
{
"path": "DotModels/onfsm_2.dot",
"chars": 354,
"preview": "digraph g {\n__start0 [label=\"\" shape=\"none\"];\nq1 [shape=\"circle\" margin=0 label=\"q1\"];\nq0 [shape=\"circle\" margin=0 label"
},
{
"path": "DotModels/onfsm_3.dot",
"chars": 864,
"preview": "digraph g {\n__start0 [label=\"\" shape=\"none\"];\nq5 [shape=\"circle\" margin=0 label=\"q5\"];\nq2 [shape=\"circle\" margin=0 label"
},
{
"path": "DotModels/onfsm_4.dot",
"chars": 297,
"preview": "digraph Angluin_Mealy {\ns0 [label=s0];\ns1 [label=s1];\ns2 [label=s2];\ns3 [label=s3];\ns0 -> s1 [label=\"a/x\"];\ns0 -> s2 ["
},
{
"path": "DotModels/onfsm_5.dot",
"chars": 436,
"preview": "digraph Angluin_Mealy {\ns0 [label=s0];\ns1 [label=s1];\ns2 [label=s2];\ns3 [label=s3];\ns4 [label=s4];\ns0 -> s1 [label=\"a/X"
},
{
"path": "DotModels/tomitaGrammars/tomita_1.dot",
"chars": 218,
"preview": "digraph \"tomita_1\" {\ns0 [label=s0, shape=doublecircle];\ns1 [label=s1];\ns0 -> s1 [label=0];\ns0 -> s0 [label=1];\ns1 -> s"
},
{
"path": "DotModels/tomitaGrammars/tomita_2.dot",
"chars": 332,
"preview": "digraph \"tomita_2\" {\ns0 [label=s0];\ns1 [label=s1];\ns2 [label=s2, shape=doublecircle];\ns3 [label=s3];\ns0 -> s3 [label=0]"
},
{
"path": "DotModels/tomitaGrammars/tomita_3.dot",
"chars": 429,
"preview": "digraph \"tomita_3\" {\ns0 [label=s0, shape=doublecircle];\ns1 [label=s1, shape=doublecircle];\ns2 [label=s2];\ns3 [label=s3];"
},
{
"path": "DotModels/tomitaGrammars/tomita_4.dot",
"chars": 372,
"preview": "digraph \"tomita_4\" {\ns0 [label=s0, shape=doublecircle];\ns1 [label=s1, shape=doublecircle];\ns2 [label=s2, shape=doublecir"
},
{
"path": "DotModels/tomitaGrammars/tomita_5.dot",
"chars": 332,
"preview": "digraph \"tomita_5\" {\ns0 [label=s0, shape=doublecircle];\ns1 [label=s1];\ns2 [label=s2];\ns3 [label=s3];\ns0 -> s1 [label=0]"
},
{
"path": "DotModels/tomitaGrammars/tomita_6.dot",
"chars": 275,
"preview": "digraph \"tomita_6\" {\ns0 [label=s0, shape=doublecircle];\ns1 [label=s1];\ns2 [label=s2];\ns0 -> s1 [label=0];\ns0 -> s2 [la"
},
{
"path": "DotModels/tomitaGrammars/tomita_7.dot",
"chars": 449,
"preview": "digraph \"tomita_7\" {\ns0 [label=s0, shape=doublecircle];\ns1 [label=s1, shape=doublecircle];\ns2 [label=s2, shape=doublecir"
},
{
"path": "Examples.py",
"chars": 53004,
"preview": "def random_deterministic_model_example():\n from aalpy.utils import generate_random_deterministic_automata\n from aa"
},
{
"path": "LICENCE.txt",
"chars": 1188,
"preview": "MIT License\n\nCopyright (c) 2026 Edi Muskardin\n TU Graz - SAL Dependable Embedded Systems Lab (DES La"
},
{
"path": "README.md",
"chars": 8576,
"preview": "<div align=\"center\">\n\n<picture style=\"align: center; padding-bottom: 8mm;\">\n <source media=\"(prefers-color-scheme: dark"
},
{
"path": "aalpy/SULs/AutomataSUL.py",
"chars": 485,
"preview": "from aalpy.base import Automaton\nfrom aalpy.base import SUL\n\n\nclass AutomatonSUL(SUL):\n def __init__(self, automaton:"
},
{
"path": "aalpy/SULs/PyMethodSUL.py",
"chars": 1677,
"preview": "from aalpy.base import SUL\n\n\nclass FunctionDecorator:\n \"\"\"\n Decorator of methods found in the SUL class.\n \"\"\"\n\n"
},
{
"path": "aalpy/SULs/RegexSUL.py",
"chars": 906,
"preview": "from aalpy.base import SUL\nimport re\n\n\nclass RegexSUL(SUL):\n \"\"\"\n An example implementation of a system under lear"
},
{
"path": "aalpy/SULs/TomitaSUL.py",
"chars": 1478,
"preview": "import re\n\nfrom aalpy.base import SUL\n\n\nclass TomitaSUL(SUL):\n \"\"\"\n Tomita grammars are often used as a benchmark "
},
{
"path": "aalpy/SULs/__init__.py",
"chars": 146,
"preview": "from .AutomataSUL import *\nfrom .PyMethodSUL import FunctionDecorator, PyClassSUL\nfrom .RegexSUL import RegexSUL\nfrom .T"
},
{
"path": "aalpy/__init__.py",
"chars": 2080,
"preview": "from .automata import (\n Dfa,\n DfaState,\n MarkovChain,\n McState,\n Mdp,\n MdpState,\n MealyMachine,\n "
},
{
"path": "aalpy/automata/Dfa.py",
"chars": 3986,
"preview": "from typing import Generic, Dict\n\nfrom aalpy.base import AutomatonState, DeterministicAutomaton\nfrom aalpy.base.Automato"
},
{
"path": "aalpy/automata/MarkovChain.py",
"chars": 2019,
"preview": "import random\nfrom typing import Generic, Tuple, List\n\nfrom aalpy.base import Automaton, AutomatonState\nfrom aalpy.base."
},
{
"path": "aalpy/automata/Mdp.py",
"chars": 3109,
"preview": "import random\nfrom collections import defaultdict\nfrom typing import Dict, Generic, List, Tuple\n\nfrom aalpy.base import "
},
{
"path": "aalpy/automata/MealyMachine.py",
"chars": 3239,
"preview": "from typing import Generic, Dict\n\nfrom aalpy.base import AutomatonState, DeterministicAutomaton\nfrom aalpy.base.Automato"
},
{
"path": "aalpy/automata/MooreMachine.py",
"chars": 3861,
"preview": "from typing import Generic, Dict\n\nfrom aalpy.base import AutomatonState, DeterministicAutomaton\nfrom aalpy.base.Automato"
},
{
"path": "aalpy/automata/NonDeterministicMooreMachine.py",
"chars": 2332,
"preview": "import random\nfrom collections import defaultdict\nfrom typing import List, Dict, Generic\n\nfrom aalpy.base import Automat"
},
{
"path": "aalpy/automata/Onfsm.py",
"chars": 2929,
"preview": "from collections import defaultdict\nfrom random import choice\nfrom typing import Generic, Tuple, Dict, List\n\nfrom aalpy."
},
{
"path": "aalpy/automata/Sevpa.py",
"chars": 23607,
"preview": "import random\nfrom collections import defaultdict, deque\nfrom typing import Union, List, Dict\n\nfrom aalpy.base import Au"
},
{
"path": "aalpy/automata/StochasticMealyMachine.py",
"chars": 4899,
"preview": "import random\nfrom collections import defaultdict\nfrom typing import Generic, Tuple, List, Dict\n\nfrom aalpy.automata imp"
},
{
"path": "aalpy/automata/Vpa.py",
"chars": 12619,
"preview": "import random\nfrom collections import defaultdict\nfrom typing import List, Dict\n\nfrom aalpy.base import Automaton, Autom"
},
{
"path": "aalpy/automata/__init__.py",
"chars": 527,
"preview": "from .Dfa import Dfa, DfaState\nfrom .Mdp import Mdp, MdpState\nfrom .MealyMachine import MealyMachine, MealyState\nfrom .M"
},
{
"path": "aalpy/base/Automaton.py",
"chars": 17124,
"preview": "import copy\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom typing import U"
},
{
"path": "aalpy/base/CacheTree.py",
"chars": 5567,
"preview": "class Node(object):\n __slots__ = ['value', 'children']\n\n def __init__(self, value=None):\n self.value = valu"
},
{
"path": "aalpy/base/Oracle.py",
"chars": 1154,
"preview": "from abc import ABC, abstractmethod\n\nfrom aalpy.base import SUL\n\n\nclass Oracle(ABC):\n \"\"\"Abstract class implemented b"
},
{
"path": "aalpy/base/SUL.py",
"chars": 5625,
"preview": "from abc import ABC, abstractmethod\n\nfrom aalpy.base.CacheTree import CacheTree, CacheDict\n\n\nclass SUL(ABC):\n \"\"\"\n "
},
{
"path": "aalpy/base/__init__.py",
"chars": 121,
"preview": "from .Automaton import Automaton, AutomatonState, DeterministicAutomaton\nfrom .Oracle import Oracle\nfrom .SUL import SUL"
},
{
"path": "aalpy/learning_algs/__init__.py",
"chars": 874,
"preview": "# public API for running automata learning algorithms\nfrom .deterministic.LStar import run_Lstar\nfrom .deterministic.KV "
},
{
"path": "aalpy/learning_algs/adaptive/AdaptiveLSharp.py",
"chars": 6379,
"preview": "import time\n\nfrom aalpy.base import Oracle, SUL\nfrom aalpy.utils.HelperFunctions import print_learning_info\nfrom .Adapti"
},
{
"path": "aalpy/learning_algs/adaptive/AdaptiveObservationTree.py",
"chars": 23573,
"preview": "from aalpy.automata import MealyMachine, MealyState\nfrom aalpy.learning_algs.adaptive.StateMatching import TotalStateMat"
},
{
"path": "aalpy/learning_algs/adaptive/StateMatching.py",
"chars": 17325,
"preview": "from abc import abstractmethod\n\n\nclass StateMatching:\n def __init__(self, alphabet, combined_model):\n \"\"\"\n "
},
{
"path": "aalpy/learning_algs/adaptive/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "aalpy/learning_algs/deterministic/ADS.py",
"chars": 7234,
"preview": "from collections import defaultdict\n\n\nclass AdsNode:\n __slots__ = ['input', 'children', 'score']\n\n def __init__(se"
},
{
"path": "aalpy/learning_algs/deterministic/Apartness.py",
"chars": 5292,
"preview": "from collections import deque\n\n\nclass Apartness:\n @staticmethod\n def compute_witness(state1, state2, ob_tree):\n "
},
{
"path": "aalpy/learning_algs/deterministic/ClassificationTree.py",
"chars": 20713,
"preview": "from collections import defaultdict\nfrom itertools import product\nfrom typing import Union\n\nfrom aalpy.automata import D"
},
{
"path": "aalpy/learning_algs/deterministic/CounterExampleProcessing.py",
"chars": 6814,
"preview": "from aalpy.base import SUL\nfrom aalpy.utils.HelperFunctions import all_suffixes, all_prefixes\n\n\ndef counterexample_succe"
},
{
"path": "aalpy/learning_algs/deterministic/KV.py",
"chars": 6596,
"preview": "import time\nfrom typing import Union\n\nfrom aalpy.automata import Dfa, DfaState, MealyState, MealyMachine, MooreState, Mo"
},
{
"path": "aalpy/learning_algs/deterministic/LSharp.py",
"chars": 4224,
"preview": "import time\n\nfrom aalpy.base import Oracle, SUL\nfrom aalpy.utils.HelperFunctions import print_learning_info\nfrom .Observ"
},
{
"path": "aalpy/learning_algs/deterministic/LStar.py",
"chars": 8144,
"preview": "import time\n\nfrom aalpy.base import Oracle, SUL\nfrom aalpy.utils.HelperFunctions import extend_set, print_learning_info,"
},
{
"path": "aalpy/learning_algs/deterministic/ObservationTable.py",
"chars": 8099,
"preview": "from collections import defaultdict\n\nfrom aalpy.base import Automaton, SUL\nfrom aalpy.automata import Dfa, DfaState, Mea"
},
{
"path": "aalpy/learning_algs/deterministic/ObservationTree.py",
"chars": 24787,
"preview": "from .ADS import Ads\nfrom .Apartness import Apartness\nfrom ... import Dfa, DfaState, MealyState, MealyMachine, MooreMach"
},
{
"path": "aalpy/learning_algs/deterministic/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "aalpy/learning_algs/deterministic_passive/ClassicRPNI.py",
"chars": 3917,
"preview": "import time\nfrom bisect import insort\nfrom aalpy.learning_algs.deterministic_passive.rpni_helper_functions import to_aut"
},
{
"path": "aalpy/learning_algs/deterministic_passive/GsmRPNI.py",
"chars": 4001,
"preview": "import time\nfrom collections import deque\n\nfrom aalpy.learning_algs.deterministic_passive.rpni_helper_functions import t"
},
{
"path": "aalpy/learning_algs/deterministic_passive/PAPNI.py",
"chars": 2623,
"preview": "from aalpy.utils import is_balanced\nfrom aalpy.automata.Vpa import vpa_from_dfa_representation\n\ndef run_PAPNI(data, vpa_"
},
{
"path": "aalpy/learning_algs/deterministic_passive/RPNI.py",
"chars": 2626,
"preview": "from typing import Union\n\nfrom aalpy.base import DeterministicAutomaton\nfrom aalpy.learning_algs.deterministic_passive.C"
},
{
"path": "aalpy/learning_algs/deterministic_passive/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "aalpy/learning_algs/deterministic_passive/active_RPNI.py",
"chars": 1818,
"preview": "from abc import ABC, abstractmethod\nfrom random import randint, choice\n\nfrom aalpy.learning_algs import run_RPNI\nfrom aa"
},
{
"path": "aalpy/learning_algs/deterministic_passive/rpni_helper_functions.py",
"chars": 6868,
"preview": "import pickle\nfrom functools import total_ordering\n\n\n@total_ordering\nclass RpniNode:\n __slots__ = ['output', 'childre"
},
{
"path": "aalpy/learning_algs/general_passive/GeneralizedStateMerging.py",
"chars": 17290,
"preview": "import functools\nfrom collections import deque\nfrom typing import Dict, Tuple, Callable, List, Optional\n\nfrom aalpy.lear"
},
{
"path": "aalpy/learning_algs/general_passive/GsmAlgorithms.py",
"chars": 6951,
"preview": "from typing import Dict, Union\n\nfrom aalpy import DeterministicAutomaton, Onfsm, NDMooreMachine\nfrom aalpy.learning_algs"
},
{
"path": "aalpy/learning_algs/general_passive/GsmNode.py",
"chars": 20920,
"preview": "import functools\nimport math\nimport pathlib\nfrom collections import defaultdict\nfrom functools import total_ordering\nfro"
},
{
"path": "aalpy/learning_algs/general_passive/Instrumentation.py",
"chars": 4946,
"preview": "from time import perf_counter\nfrom typing import Dict, Optional\n\nfrom aalpy.learning_algs.general_passive.GeneralizedSta"
},
{
"path": "aalpy/learning_algs/general_passive/ScoreFunctionsGSM.py",
"chars": 9126,
"preview": "from math import sqrt, log\nfrom typing import Callable, Dict, List, Iterable, Any\n\nfrom aalpy.learning_algs.general_pass"
},
{
"path": "aalpy/learning_algs/general_passive/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "aalpy/learning_algs/non_deterministic/AbstractedOnfsmLstar.py",
"chars": 6280,
"preview": "import time\n\nfrom aalpy.base import SUL, Oracle\nfrom aalpy.learning_algs.non_deterministic.AbstractedOnfsmObservationTab"
},
{
"path": "aalpy/learning_algs/non_deterministic/AbstractedOnfsmObservationTable.py",
"chars": 15505,
"preview": "from collections import defaultdict\n\nfrom aalpy.automata import Onfsm, OnfsmState\nfrom aalpy.learning_algs.non_determini"
},
{
"path": "aalpy/learning_algs/non_deterministic/NonDeterministicSULWrapper.py",
"chars": 634,
"preview": "from aalpy.base import SUL\nfrom aalpy.learning_algs.non_deterministic.TraceTree import TraceTree\n\n\nclass NonDeterministi"
},
{
"path": "aalpy/learning_algs/non_deterministic/OnfsmLstar.py",
"chars": 4870,
"preview": "import time\n\nfrom aalpy.base import SUL, Oracle\nfrom aalpy.learning_algs.non_deterministic.NonDeterministicSULWrapper im"
},
{
"path": "aalpy/learning_algs/non_deterministic/OnfsmObservationTable.py",
"chars": 6828,
"preview": "from collections import Counter\n\nfrom aalpy.automata import Onfsm, OnfsmState, StochasticMealyState, StochasticMealyMach"
},
{
"path": "aalpy/learning_algs/non_deterministic/TraceTree.py",
"chars": 5808,
"preview": "from collections import defaultdict\n\n\nclass Node:\n __slots__ = ['output', 'children', 'parent', 'frequency_counter']\n"
},
{
"path": "aalpy/learning_algs/non_deterministic/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "aalpy/learning_algs/stochastic/DifferenceChecker.py",
"chars": 7587,
"preview": "from abc import ABC, abstractmethod\nfrom math import sqrt, log\n\nchi2_table = dict()\n\nchi2_table[0.95] = \\\n dict([(1, "
},
{
"path": "aalpy/learning_algs/stochastic/SamplingBasedObservationTable.py",
"chars": 24894,
"preview": "from collections import defaultdict\n\nfrom aalpy.automata import Mdp, MdpState, StochasticMealyState, StochasticMealyMach"
},
{
"path": "aalpy/learning_algs/stochastic/StochasticCexProcessing.py",
"chars": 3295,
"preview": "from aalpy.automata import Mdp\nfrom aalpy.base import SUL\n\n\ndef stochastic_longest_prefix(cex, prefixes):\n \"\"\"\n Co"
},
{
"path": "aalpy/learning_algs/stochastic/StochasticLStar.py",
"chars": 9953,
"preview": "import time\n\nfrom aalpy.base import SUL, Oracle\nfrom aalpy.learning_algs.stochastic.DifferenceChecker import AdvancedHoe"
},
{
"path": "aalpy/learning_algs/stochastic/StochasticTeacher.py",
"chars": 12846,
"preview": "from collections import defaultdict\nfrom random import choice, random\n\nfrom aalpy.base import SUL\nfrom aalpy.learning_al"
},
{
"path": "aalpy/learning_algs/stochastic/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "aalpy/learning_algs/stochastic_passive/ActiveAleriga.py",
"chars": 2804,
"preview": "from abc import ABC, abstractmethod\nfrom random import randint, choice\n\nfrom aalpy.learning_algs import run_Alergia\n\n\ncl"
},
{
"path": "aalpy/learning_algs/stochastic_passive/Alergia.py",
"chars": 9869,
"preview": "import time\nfrom bisect import insort\n\nfrom aalpy.automata import MarkovChain, MdpState, Mdp, McState, StochasticMealySt"
},
{
"path": "aalpy/learning_algs/stochastic_passive/CompatibilityChecker.py",
"chars": 1707,
"preview": "from abc import ABC, abstractmethod\nfrom math import sqrt, log\n\nfrom aalpy.learning_algs.stochastic_passive.FPTA import "
},
{
"path": "aalpy/learning_algs/stochastic_passive/FPTA.py",
"chars": 3065,
"preview": "from functools import total_ordering\n\n\n@total_ordering\nclass AlergiaPtaNode:\n __slots__ = ['prefix', 'output', 'input"
},
{
"path": "aalpy/learning_algs/stochastic_passive/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "aalpy/oracles/BreadthFirstExplorationEqOracle.py",
"chars": 1384,
"preview": "from aalpy.base.Oracle import Oracle\nfrom aalpy.base.SUL import SUL\n\nfrom itertools import product\nfrom random import sh"
},
{
"path": "aalpy/oracles/CacheBasedEqOracle.py",
"chars": 3109,
"preview": "from aalpy.base import Oracle, SUL\nfrom aalpy.base.SUL import CacheSUL\n\nfrom random import choice\n\n\nclass CacheBasedEqOr"
},
{
"path": "aalpy/oracles/KWayStateCoverageEqOracle.py",
"chars": 3446,
"preview": "from random import choices, shuffle\n\nfrom aalpy.base import Oracle, SUL\nfrom itertools import combinations, permutations"
},
{
"path": "aalpy/oracles/KWayTransitionCoverageEqOracle.py",
"chars": 8606,
"preview": "from collections import namedtuple\nfrom itertools import product\nfrom random import choices, randint, random\n\nfrom aalpy"
},
{
"path": "aalpy/oracles/PacOracle.py",
"chars": 1695,
"preview": "from math import ceil, log\nfrom random import choice, randint\n\nfrom aalpy.base import Oracle, SUL\n\n\nclass PacOracle(Orac"
},
{
"path": "aalpy/oracles/PerfectKnowledgeEqOracle.py",
"chars": 607,
"preview": "from aalpy.base import Oracle, SUL, DeterministicAutomaton\nfrom aalpy.utils import bisimilar\n\n\nclass PerfectKnowledgeEqO"
},
{
"path": "aalpy/oracles/ProvidedSequencesOracleWrapper.py",
"chars": 1695,
"preview": "from aalpy.base import Oracle, SUL\n\n\nclass ProvidedSequencesOracleWrapper(Oracle):\n def __init__(self, alphabet: list"
},
{
"path": "aalpy/oracles/RandomWalkEqOracle.py",
"chars": 2962,
"preview": "import random\n\nfrom aalpy.automata import Onfsm, Mdp, StochasticMealyMachine\nfrom aalpy.base import Oracle, SUL\n\nautomat"
},
{
"path": "aalpy/oracles/RandomWordEqOracle.py",
"chars": 3447,
"preview": "from statistics import mean\n\nfrom aalpy.automata import Onfsm, Mdp, StochasticMealyMachine\nfrom aalpy.base import Oracle"
},
{
"path": "aalpy/oracles/StatePrefixEqOracle.py",
"chars": 3057,
"preview": "import random\n\nfrom aalpy.base.Oracle import Oracle\nfrom aalpy.base.SUL import SUL\n\n\nclass StatePrefixEqOracle(Oracle):\n"
},
{
"path": "aalpy/oracles/TransitionFocusOracle.py",
"chars": 2096,
"preview": "import random\n\nfrom aalpy.base.Oracle import Oracle\nfrom aalpy.base.SUL import SUL\n\n\nclass TransitionFocusOracle(Oracle)"
},
{
"path": "aalpy/oracles/UserInputEqOracle.py",
"chars": 2631,
"preview": "from aalpy.base import Oracle, SUL\nfrom aalpy.utils.FileHandler import visualize_automaton\n\n\nclass UserInputEqOracle(Ora"
},
{
"path": "aalpy/oracles/WMethodEqOracle.py",
"chars": 4929,
"preview": "from random import shuffle, choice, randint\n\nfrom aalpy.base.Oracle import Oracle\nfrom aalpy.base.SUL import SUL\nfrom it"
},
{
"path": "aalpy/oracles/WpMethodEqOracle.py",
"chars": 6368,
"preview": "import random\nfrom aalpy.base.Oracle import Oracle\nfrom aalpy.base.SUL import SUL\nfrom itertools import chain, product\n\n"
},
{
"path": "aalpy/oracles/__init__.py",
"chars": 839,
"preview": "from .BreadthFirstExplorationEqOracle import BreadthFirstExplorationEqOracle\nfrom .CacheBasedEqOracle import CacheBasedE"
},
{
"path": "aalpy/paths.py",
"chars": 448,
"preview": "\"\"\"\nFile in which necessary paths for model checking are defined.\n\npath_to_prism is the absolute or relative path to the"
},
{
"path": "aalpy/utils/AutomatonGenerators.py",
"chars": 21736,
"preview": "import random\nimport warnings\n\nfrom aalpy.automata import Dfa, DfaState, MdpState, Mdp, MealyMachine, MealyState, \\\n "
},
{
"path": "aalpy/utils/BenchmarkSULs.py",
"chars": 12910,
"preview": "def get_Angluin_dfa():\n from aalpy.automata import Dfa\n\n angluin_dfa = {\n 'q0': (True, {'a': 'q1', 'b': 'q2"
},
{
"path": "aalpy/utils/BenchmarkSevpaModels.py",
"chars": 10773,
"preview": "from aalpy.automata.Sevpa import Sevpa\nfrom aalpy.utils import load_automaton_from_file\n\n\ndef sevpa_for_L1():\n state_"
},
{
"path": "aalpy/utils/BenchmarkVpaModels.py",
"chars": 15926,
"preview": "import random\n\nfrom aalpy.automata.Vpa import Vpa, VpaAlphabet\n\n\ndef vpa_L1():\n # we always ensure that n >= 1\n\n c"
},
{
"path": "aalpy/utils/DataHandler.py",
"chars": 2153,
"preview": "from abc import ABC, abstractmethod\n\n\nclass DataHandler(ABC):\n \"\"\"\n Abstract class used for data loading for Alerg"
},
{
"path": "aalpy/utils/FileHandler.py",
"chars": 19016,
"preview": "import re\nimport sys\nimport traceback\nfrom pathlib import Path\n\nfrom pydot import Dot, Node, Edge\n\nfrom aalpy.automata i"
},
{
"path": "aalpy/utils/HelperFunctions.py",
"chars": 16350,
"preview": "import random\nimport string\nfrom itertools import product\nfrom collections import defaultdict\n\nfrom aalpy import Mdp, Ma"
},
{
"path": "aalpy/utils/ModelChecking.py",
"chars": 16073,
"preview": "import itertools as it\nimport os\nimport re\nfrom collections import defaultdict\nfrom queue import Queue\nfrom random impor"
},
{
"path": "aalpy/utils/Sampling.py",
"chars": 3323,
"preview": "from functools import wraps\nfrom random import randint, choices, random\n\nfrom aalpy import MooreMachine, Dfa, NDMooreMac"
},
{
"path": "aalpy/utils/__init__.py",
"chars": 1066,
"preview": "from .AutomatonGenerators import (\n generate_random_dfa,\n generate_random_mealy_machine,\n generate_random_smm,\n"
},
{
"path": "docs/README.md",
"chars": 3431,
"preview": "AALpy is a light-weight automata learning library written in Python. \nYou can start learning models of black-box systems"
},
{
"path": "docs/_config.yml",
"chars": 217,
"preview": "title : AALpy\nauthor: Edi Muskardin\ndescription: An Active Automata Learning Library\nrss-description: An Active Automata"
},
{
"path": "docs/google306875680a34d740.html",
"chars": 53,
"preview": "google-site-verification: google306875680a34d740.html"
},
{
"path": "docs/instructions.txt",
"chars": 224,
"preview": "1. For commenting use Google style with one additional space for between Args: and arguments, and one space between\n "
}
]
// ... and 25 more files (download for full content)
About this extraction
This page contains the full source code of the DES-Lab/AALpy GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 225 files (29.8 MB), approximately 1.4M tokens, and a symbol index with 996 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.