[
  {
    "path": ".gitignore",
    "content": "### https://raw.github.com/github/gitignore/1a92ca87b787b6a0c68eb10371ebd0bfd823db9b/Global/Windows.gitignore\n\n# Windows image file caches\nThumbs.db\nehthumbs.db\n\n# Folder config file\nDesktop.ini\n\n# Recycle Bin used on file shares\n$RECYCLE.BIN/\n\n# Windows Installer files\n*.cab\n*.msi\n*.msm\n*.msp\n\n# Windows shortcuts\n*.lnk\n\n\n### https://raw.github.com/github/gitignore/1a92ca87b787b6a0c68eb10371ebd0bfd823db9b/Global/Linux.gitignore\n\n*~\n\n# temporary files which can be created if a process still has a handle open of a deleted file\n.fuse_hidden*\n\n# KDE directory preferences\n.directory\n\n# Linux trash folder which might appear on any partition or disk\n.Trash-*\n\n# .nfs files are created when an open file is removed but is still being accessed\n.nfs*\n\n\n### https://raw.github.com/github/gitignore/1a92ca87b787b6a0c68eb10371ebd0bfd823db9b/Global/macOS.gitignore\n\n*.DS_Store\r\n.AppleDouble\r\n.LSOverride\r\n\r\n# Icon must end with two \\r\r\nIcon\r\n\r\n\r\n# Thumbnails\r\n._*\r\n\r\n# Files that might appear in the root of a volume\r\n.DocumentRevisions-V100\r\n.fseventsd\r\n.Spotlight-V100\r\n.TemporaryItems\r\n.Trashes\r\n.VolumeIcon.icns\r\n.com.apple.timemachine.donotpresent\r\n\r\n# Directories potentially created on remote AFP share\r\n.AppleDB\r\n.AppleDesktop\r\nNetwork Trash Folder\r\nTemporary Items\r\n.apdisk\r\n\n\n### https://raw.github.com/github/gitignore/1a92ca87b787b6a0c68eb10371ebd0bfd823db9b/Global/Emacs.gitignore\n\n# -*- mode: gitignore; -*-\n*~\n\\#*\\#\n/.emacs.desktop\n/.emacs.desktop.lock\n*.elc\nauto-save-list\ntramp\n.\\#*\n\n# Org-mode\n.org-id-locations\n*_archive\n\n# flymake-mode\n*_flymake.*\n\n# eshell files\n/eshell/history\n/eshell/lastdir\n\n# elpa packages\n/elpa/\n\n# reftex files\n*.rel\n\n# AUCTeX auto folder\n/auto/\n\n# cask packages\n.cask/\ndist/\n\n# Flycheck\nflycheck_*.el\n\n# server auth directory\n/server/\n\n# projectiles files\n.projectile\n\n# directory configuration\n.dir-locals.el\n\n\n### https://raw.github.com/github/gitignore/1a92ca87b787b6a0c68eb10371ebd0bfd823db9b/Global/Vim.gitignore\n\n# swap\n[._]*.s[a-w][a-z]\n[._]s[a-w][a-z]\n# session\nSession.vim\n# temporary\n.netrwhist\n*~\n# auto-generated tag files\ntags\n\n\n### https://raw.github.com/github/gitignore/1a92ca87b787b6a0c68eb10371ebd0bfd823db9b/Global/Tags.gitignore\n\n# Ignore tags created by etags, ctags, gtags (GNU global) and cscope\nTAGS\n.TAGS\n!TAGS/\ntags\n.tags\n!tags/\ngtags.files\nGTAGS\nGRTAGS\nGPATH\nGSYMS\ncscope.files\ncscope.out\ncscope.in.out\ncscope.po.out\n\n\n\n### https://raw.github.com/github/gitignore/1a92ca87b787b6a0c68eb10371ebd0bfd823db9b/python.gitignore\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nenv/\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\n*.egg-info/\n.installed.cfg\n*.egg\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*,cover\n.hypothesis/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# dotenv\n.env\n\n# virtualenv\n.venv/\nvenv/\nENV/\n\n# Spyder project settings\n.spyderproject\n\n# Rope project settings\n.ropeproject\n\n\n\n\n\n\n### gekkoJaponicus\noutput\nsrc\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM python:3.6.6-jessie\n\n\nENV LANG en_US.UTF-8\n\n# install dependencies;\n#RUN apt-get update -y\n#RUN apt-get install software-properties-common python-software-properties -y\n\n\nRUN apt-get update -y\nRUN apt-get upgrade -y\n\n\nRUN apt-get install python3-pip python3-numpy -y\n\nRUN pip3.6 install --upgrade pip\n\nCOPY ./requirements.txt /opt/japonicus/requirements.txt\n\n# those are required to build other python modules, so install first;\nRUN pip3.6 install numpy cython pandas\n\nRUN pip3.6 install --ignore-installed -r /opt/japonicus/requirements.txt\n\n\nWORKDIR /opt/japonicus/\n\nCOPY . /opt/japonicus\n\nEXPOSE 5000\n\nRUN python3.6 --version\n\nENTRYPOINT [\"python3.6\", \"/opt/japonicus/japonicus-run\"]\nCMD [\"python3.6\", \"/opt/japonicus/japonicus-run\", \"--help\"]\n"
  },
  {
    "path": "LICENSE",
    "content": "The MIT License (MIT)\n\nCopyright (c) 2014-2017 Mike van Rossum mike@mvr.me\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "### What is japonicus and what it does\nThis is an implementation of genetic algorithm & bayesian evolution to develop strategies for digital coin trading bot <a href=\"https://github.com/askmike/gekko\">Gekko</a>.\n\nSo you make a good strat, or get one from the internetz. Make sure its good, because this is not about miracles.\n\nIf you get good profit on strat standard settings or some random settings you made up, japonicus can find some setting set that improves the strategy, on some specific market/currency or overall.\n\nDiscord Group: `https://discord.gg/kYKHXnV`\n\n## Instructions\nJaponicus works on `python>=3.6`!\nCheck wiki for instructions on setup, workflow, methods, etc.\n\n## Disclaimer\n\nNo matter how many years your training candles span or how convoluted or simple is your strategy,\nyour strategies/parameters that were profitable on backtest runs probably won't translate well to live trading.<br>\nAltough japonicus is a fairly competent GA and will find a capable set of parameters in a few epoches,\nwe're yet to discover why the live trading runs seem an environment completely unrelated to backtest env, where\nstrategies/parameters that seemed good offline turn into live daily losses or below market gains.\n\nYou can send me a few coins to help me develop some ideas for binance trading bots,\nas this kind of research involves a steady negative profit so I cannot sustain it indefinitely...\nIf something starts working I will share here or in our discord group. Those are my binance wallets:\n\n```\nLTC\tLVhThMzJMC6aKBcA1KX4q3yk2ryBjfPGfH\nETH\t0xceaa9bb655ed80ba36b55532fdd6e11e6e5b681b\n```\n\n## User Feedback\n\nYou all users of japonicus should report notable runs under an issue.\nIf some strat seems to be viable, send feedback so users can have a better point of entry for their own runs.<br>\n\n## Future\n\nGenetic Algorithms are a good way to fetch a good set of settings to run a strategy on gekko. <br>\nBut the real gamechanger is the strategy itself.<br>\nThe ideal evolution method would be a Genetic Programming that modifies strategy logic. <br>\nThis somewhat corresponds to `--skeleton` mode of japonicus, which lets the GA select indicators on a base strategy.\n\n\n# Changelog\n\nThe changelog is important for the user as following modifications can cause bugs on related areas. Please report 'em ;)\n\n```\nv0.92\n- Moving all gekko related functions to evaluation.gekko module. The purpose is making japonicus a general purpose\nGA framework.\n\n\nv0.91\n\n- the evolution candle date ranges are now defined by given area in the map, instead of attached at each locale.\n\n\nv0.90 \n\n- web interface reworked - now it is the recommended method to run the ga's.\n- locale creation/destruction chances updated.\n- bayesian evolution method deprecated.\n\nv0.80 \n\n- supports gekko v0.6.X (only).\n- Dockerfile and docker-compose methods revisited.\n- automatic filter for multiple remote gekko urls (urls defined inside settings/global)\n- live trading bot watcher at `jlivetrader.py`. For binance only, undocumented and experimental.\n\n\nv0.70 \n\n- log folder restructured\n- configStrategies.py DEPRECATED; use only TOML parameters at the folder strategy_parameters.\n    check TOML special syntax for parameter ranges at the wiki\n- GA benchmark mode added\n- Settings.py refactor\n- Roundtrip exposure time filter\n\n\nv0.58\n\n- runs in Windows (not confirmed)\n- Settings parameters can be passed on command line (check the --help)\n- Multiple evolution datasets can be passed. `@Settings.py:dataset ->\n  dataset_source is the first, add dataset_source1; dataset_source2 and so forth\nfor multiple datasets.`\n- filter individues for minimum trade count (default: enabled@16 trades)\n- backtest scores (profit and sharpe) to individue final score method is now a sum, not multiplication\n\nv0.56 \n\n- japonicus settings for strategies can be stored at strategy_parameters folder as .toml files\n- automated refactor on entire codebase\n- wiki is online, check it for instructions.\n- various bugfixes\n- log & results improvements\n- daterange for locales now on locale logs (.csv)\n- statistics methods remade.\n\nv0.54\n\n- Variation of Backtest result interpretation. check Settings.py -> genconf.interpreteBacktestProfit\n- Focus on selecting best individues. Periodic evaluation on more candidates. Bugfixes on that department. \n- Result interface actually readable.\n- Log better structured, with the summary at the top.\n- Small clarifications on code.\n\nv0.53\n\n- Major aesthetics rework on code itself; now we can even have collaborators.\n- Pretty run logs @ logs folder;\n- Interchangeable backtest result interpretation (promoterz.evaluation.gekko:25)\n- gekko API is now organized - backtest & dadataset functions separated.\n- Genetic Algorithm settings controllable via command line. Check --help.\n- Web interface more stable\n\nv0.51\n- Started tracking updates on changelog;\n\n```\n"
  },
  {
    "path": "docker-compose.yaml",
    "content": "version: '3'\nservices:\n  gekko:\n    image: gekko\n    volumes:\n      - gekko-dir:/usr/src/app\n    ports:\n      - \"3000:3000\"\n  japonicus:\n    command: $JARGS\n    \n    image: japonicus\n    build: .\n    volumes:\n      - gekko-dir:/root/gekko\n    ports:\n      - \"5000:5000\"\n\n    depends_on:\n      - gekko\n\nvolumes:\n  gekko-dir:\n"
  },
  {
    "path": "evaluation/__init__.py",
    "content": "#!/bin/python\nfrom .import gekko\nfrom . import benchmark\n"
  },
  {
    "path": "evaluation/benchmark/__init__.py",
    "content": "#!/bin/python\n\nfrom . import benchmark\n"
  },
  {
    "path": "evaluation/benchmark/benchmark.py",
    "content": "#!/bin/python\n\n# source https://www.researchgate.net/publication/27382766_On_benchmarking_functions_for_genetic_algorithm\nimport random\nimport math\n\n\ndef evalRosenbrock(parameters):\n    Result = pow(1-parameters[0], 2)\n    Result += 100 * pow(pow(parameters[0], 2) - parameters[1], 2)\n\n    return -Result\n\n\ndef evalGriewangk(parameters):\n    Dimensions = 10\n    Result = 1\n\n    for w in range(Dimensions):\n        W = w + 1\n        Result += pow(parameters[w], 2) / 4000\n\n    COSs = math.cos(parameters[0])\n    for z in range(1, Dimensions):\n        Z = z + 1\n        COSs *= (math.cos(parameters[z]) / math.sqrt(Z))\n\n    Result -= COSs\n\n    return -Result\n\n\ndef evalRastrigin(parameters):\n    Dimensions = 20\n    Result = 10 * Dimensions\n\n    for w in range(Dimensions):\n        W = w + 1\n        Result += pow(parameters[w], 2)\n        Result -= 10 * (math.cos(2*math.pi*parameters[w]))\n\n    return -Result\n\n\ndef evalSchwefel(parameters):\n    A = 4189.829101\n    Open = 10 * A\n    Result = Open\n    for w in range(10):\n        W = w + 1\n        Result += -parameters[w] * math.sin(math.sqrt(abs(parameters[w])))\n\n    return -Result\n\n\ndef evalQuartic(parameters):\n    Result = 0\n    for w in range(30):\n        W = w + 1\n        Result += W * pow(parameters[w], 4) + random.gauss(0, 1)\n\n    return -Result\n\n\ndef evalFoxHole(parameters):\n    # MIN = 0.998003837794449325873406851315\n    Result = 0.002\n    a = [\n        [-32, -16, 0, 16, 32,\n         -32, -16, 0, 16, 32,\n         -32, -16, 0, 16, 32,\n         -32, -16, 0, 16, 32,\n         -32, -16, 0, 16, 32],\n        [-32, -32, -32, -32, -32,\n         -16, -16, -16, -16, -16,\n         0, 0, 0, 0, 0,\n         16, 16, 16, 16, 16,\n         32, 32, 32, 32, 32]\n    ]\n\n    for w in range(25):\n        W = 1+w\n        D = W\n        for k in range(2):\n            D += pow((parameters[k] - a[k][w]), 6)\n        Result += (1/D)\n\n    Result = 1/Result\n    return -Result\n\n\ndef Evaluate(genconf, phenotype):\n    evalFunctionName = list(phenotype.keys())[0]\n    parameters = phenotype[evalFunctionName]\n    parameters = [parameters[N] for N in sorted(list(parameters.keys()))]\n\n    evalFunctions = {\n        'quartic': evalQuartic,\n        'foxhole': evalFoxHole,\n        'schwefel': evalSchwefel,\n        'rastrigin': evalRastrigin,\n        'griewangk': evalGriewangk,\n        'rosenbrock': evalRosenbrock\n    }\n    result = evalFunctions[evalFunctionName](parameters)\n\n    return {\n        'relativeProfit': result,\n        'sharpe': 0,\n        'trades': 25,\n        'averageExposure': 0\n    }\n"
  },
  {
    "path": "evaluation/benchmark/generateConfig.py",
    "content": "#!/bin/python\nimport pytoml\n\nNBP = 30\nPRANGE = [-1.28, 1.28]\n\nNBP = 25\nPRANGE = [-65536, 65536]\n\nNBP = 10\nPRANGE = [-500, 500]\n\nNBP = 20\nPRANGE = [-5.12, 5.12]\n\n#NBP = 10\n#PRANGE = [-600, 600]\n\n#NBP = 2\n#PRANGE = [-2.048, 2.048]\n\nPARAMETERS = {}\nfor P in range(NBP):\n    PNAME = 'P%i' % P\n    PARAMETERS.update({PNAME: PRANGE})\n\nTOMLTEXT = pytoml.dumps(PARAMETERS)\nopen('config.toml', 'w').write(TOMLTEXT)\n"
  },
  {
    "path": "evaluation/gekko/API.py",
    "content": "#!/bin/python\nimport os\nimport requests\nimport json\nfrom subprocess import Popen, PIPE\n\n\ndef initializeGekko():  # not used yet.\n    CMD = ['node', gekkoDIR + '/gekko', '--ui']\n    D = Popen(CMD, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n\n    \ndef checkInstance(instanceUrl):\n    try:\n        Request = requests.get(instanceUrl)\n    except Exception:\n        return False\n    if Request.text:\n        return True\n\n\ndef httpPost(URL, data={}, Verbose=True):\n    try:\n        Request = requests.post(URL, json=data)\n        Response = json.loads(Request.text)\n\n    except ConnectionRefusedError:\n        print(\"Error: Gekko comm error! Check your local Gekko instance.\")\n        exit()\n    except Exception as e:\n        if Verbose:\n            print(\"Error: config failure\")\n            print(e)\n            print(URL)\n            print(data)\n        return False\n\n    return Response\n\n\ndef loadHostsFile(HostsFilePath):\n    remoteGekkos = []\n    if os.path.isfile(HostsFilePath):\n        H = open(HostsFilePath).read().split('\\n')\n        for W in H:\n            if W and not '=' in W and not '[' in W:\n                remoteGekkos.append(\"http://%s:3000\" % W)\n    return remoteGekkos\n"
  },
  {
    "path": "evaluation/gekko/__init__.py",
    "content": "#!/bin/python\nimport os\nimport subprocess\n\nfrom .import API\nfrom .import dataset\nfrom .import backtest\nfrom .import datasetOperations\nfrom .statistics import *\nimport pathlib\n\nimport promoterz\n\n\nclass GekkoEvaluator():\n    def __init__(self):\n        pass\n\n\nSettingsFiles = [\n    \"generation\",\n    \"Global\",\n    \"dataset\",\n    #\"indicator\",\n    \"backtest\",\n    \"evalbreak\"\n]\n\n\ndef showBacktestResult(backtestResult, dataset=None):\n    messageBackbone = ''.join([\n        'Test on random candles...  ',\n        'relativeProfit: %.3f \\t',\n        'nbTrades: %.1f\\t',\n        'sharpe: %.2f'\n    ])\n\n    message = messageBackbone % (\n        backtestResult['relativeProfit'],\n        backtestResult['trades'],\n        backtestResult['sharpe']\n    )\n\n    if dataset:\n        message += \"\\n\\t\\t%s\\t%s\" % (dataset.textDaterange(),\n                                     dataset.textSpecifications())\n\n    return message\n\n\ndef parseDatasetInfo(purpose, candlestickDataset):\n    textdaterange = datasetOperations.dateRangeToText(\n        candlestickDataset.daterange)\n    print()\n    Text = \"\\n%s candlestick dataset %s\\n\" % (purpose, textdaterange)\n    Text += candlestickDataset.textSpecifications() + '\\n'\n    return Text\n\n\ndef showPrimaryInfo(Logger, evolutionDatasets, evaluationDatasets):\n    for evolutionDataset in evolutionDatasets:\n        Logger.log(\n            parseDatasetInfo(\"evolution\", evolutionDataset),\n            target=\"Header\"\n        )\n    if evaluationDatasets:\n        for evaluationDataset in evaluationDatasets:\n            Logger.log(\n                parseDatasetInfo(\"evaluation\", evaluationDataset),\n                target=\"Header\"\n            )\n\n\n\nclass GekkoEvaluationPool(promoterz.evaluationPool.EvaluationPool):\n    #def __init__(self, World, Urls, poolsize, individual_info):\n    #    pass\n\n    def ejectURL(self, Index):\n        self.Urls.pop(Index)\n        self.lasttimes.pop(Index)\n        self.lasttimesperind.pop(Index)\n        self.poolsizes.pop(Index)\n\n    def distributeIndividuals(self, tosimulation):\n        nb_simulate = len(tosimulation)\n        sumtimes = sum(self.lasttimes)\n        # stdtime = sum(self.lasttimes)/len(self.lasttimes)\n        std = nb_simulate / len(self.Urls)\n        # stdTPI = sum(self.lasttimesperind)/len(self.lasttimesperind)\n        #print(stdTPI)\n        if sumtimes:\n            vels = [1 / x for x in self.lasttimes]\n            constant = nb_simulate / sum(vels)\n            proportions = [max(1, x * constant) for x in vels]\n        else:\n            proportions = [std for x in self.Urls]\n        proportions = [int(round(x)) for x in proportions]\n        pC = lambda x: random.randrange(0, len(x))\n        pB = lambda x: x.index(min(x))\n        pM = lambda x: x.index(max(x))\n        while sum(proportions) < nb_simulate:\n            proportions[pB(proportions)] += 1\n            print('+')\n        while sum(proportions) > nb_simulate:\n            proportions[pM(proportions)] -= 1\n            print('-')\n        print(proportions)\n        assert (sum(proportions) == nb_simulate)\n        distribution = []\n        L = 0\n        for P in proportions:\n            distribution.append(tosimulation[L: L + P])\n            L = L + P\n        return distribution\n\n\nEvaluationPool = GekkoEvaluationPool\n\n\ndef ResultToIndividue(result, individue):\n    individue.fitness.values = (result['relativeProfit'], result['sharpe'])\n    individue.trades = result['trades']\n    individue.averageExposure = result['averageExposure'] / 3600000\n\n\ndef showIndividue(evaldata):\n    return \"~ bP: %.3f\\tS: %.3f\\tnbT:%.3f\" % (\n        evaldata['relativeProfit'], evaldata['sharpe'], evaldata['trades']\n    )\n\n\ndef validateSettings(settings):\n    # LOCATE & VALIDATE RUNNING GEKKO INSTANCES FROM CONFIG URLs;\n    possibleInstances = settings['Global']['GekkoURLs']\n    validatedInstances = []\n    for instance in possibleInstances:\n        Response = API.checkInstance(instance)\n        if Response:\n            validatedInstances.append(instance)\n            print(\"found gekko @ %s\" % instance)\n        else:\n            print(\"unable to locate %s\" % instance)\n\n    if validatedInstances:\n        settings['Global']['GekkoURLs'] = validatedInstances\n\n    else:\n        print(\"Aborted: No running gekko instances found.\")\n        return False\n\n    GekkoPath = settings['Global']['gekkoPath'] + '/gekko.js'\n    GekkoPath = GekkoPath.replace(\"$HOME\", str(pathlib.Path.home()))\n    # FIX THIS;\n    if False and not os.path.isfile(GekkoPath):\n        print(\n            \"Aborted: gekko.js not found\" + \n            \"on path specified @Settings.py;\\n%s\" % GekkoPath)\n        return False\n\n    return True\n\n\n# DEPRECATED;\ndef launchGekkoChildProcess(settings):\n    gekko_args = [\n        'node',\n        '--max-old-space-size=8192',\n        settings['global']['gekkoPath'] + '/web/server.js',\n    ]\n    gekko_server = subprocess.Popen(gekko_args,\n                                    stdin=subprocess.PIPE,\n                                    stdout=subprocess.PIPE)\n    return gekko_server\n\n\n"
  },
  {
    "path": "evaluation/gekko/backtest.py",
    "content": "#!/bin/python\nfrom .API import httpPost\n\n\ndef interpreteBacktestProfitv1(backtest):\n    return backtest['relativeProfit']\n\n\ndef interpreteBacktestProfitv2(backtest):\n    return backtest['relativeProfit'] - backtest['market']\n\n\ndef interpreteBacktestProfitv3(backtest):\n    if backtest['relativeProfit'] < 0 and backtest['market'] < 0:\n        return backtest['relativeProfit']\n\n    else:\n        return backtest['relativeProfit'] - backtest['market']\n\n\ndef getInterpreterBacktestInfo(v):\n    info = {\n        'v1': \"<shown profit> = <backtest profit>\",\n        'v2': \"<shown profit> = <backtest profit> - <market profit>\",\n        'v3': \"\\nif <backtest profit> > 0: <shown profit> = <backtest profit> - <market profit> \\nelse <shown profit> = <backtest profit> \"\n}\n\n    return \"interpreter %s: \" % v + info[v]\n\n\ndef runBacktest(\n    GekkoInstanceUrl,\n    TradeSetting,\n    Dataset,\n    candleSize=10,\n    gekko_config=None,\n    Debug=False,\n):\n    gekko_config = createConfig(\n        TradeSetting, Dataset.specifications, Dataset.daterange, candleSize,\n        gekko_config, Debug\n    )\n    url = GekkoInstanceUrl + '/api/backtest'\n    fakeReport = {\n        'relativeProfit': 0, 'market': 0, 'trades': 0,\n        'sharpe': 0, 'roundtrips': []\n    }\n    try:\n        result = httpPost(url, gekko_config)\n        # sometime report is False(not dict)\n        if type(result['performanceReport']) is bool:\n            print(\"Warning: performanceReport not found, probable Gekko fail!\")\n            print(Dataset.specifications)\n            # That fail is so rare that has no impact.. still happens randomly;\n            return fakeReport  # fake backtest report\n    except Exception as e:\n        print(e)\n        return fakeReport\n\n    # rProfit = result['report']['relativeProfit']\n    # nbTransactions = result['report']['trades']\n    # market = result['report']['market']\n    backtestResult = result['performanceReport']\n    if 'roundtrips' in result.keys():\n        backtestResult['roundtrips'] = result['roundtrips']\n\n    return backtestResult\n\n\ndef Evaluate(backtestconf, Datasets, phenotype, GekkoInstanceUrl):\n    # IndividualToSettings(IND, STRAT) is a function that depends on GA algorithm,\n    # so should be provided;\n    result = [\n        runBacktest(\n            GekkoInstanceUrl,\n            phenotype,\n            Dataset,\n            candleSize=backtestconf.candleSize,\n            Debug=backtestconf.gekkoDebug,\n        )\n        for Dataset in Datasets\n    ]\n    interpreter = {\n        'v1': interpreteBacktestProfitv1,\n        'v2': interpreteBacktestProfitv2,\n        'v3': interpreteBacktestProfitv3,\n    }\n    # --INTERPRETE BACKTEST RESULT;\n    RelativeProfits = [interpreter[backtestconf.interpreteBacktestProfit](R) for R in result]\n    avgTrades = sum([R['trades'] for R in result]) / len(Datasets)\n    mRelativeProfit = sum(RelativeProfits) / len(RelativeProfits)\n    avgSharpe = sum([R['sharpe'] for R in result if R['sharpe']])\n    avgSharpe = avgSharpe / len(Datasets)\n\n    # --CALCULATE EXPOSURE DURATIONS;\n    for R in result:\n        R['totalExposure'] = 0\n        R['averageExposure'] = 0\n        if 'roundtrips' in R.keys():\n            for roundtrip in R['roundtrips']:\n                R['totalExposure'] += roundtrip['duration']\n            R['averageExposure'] = R['totalExposure'] / len(R['roundtrips']) if len(R['roundtrips']) else 0\n\n    avgExposure = sum(R['averageExposure'] for R in result) / len(Datasets)\n    return {\n        'relativeProfit': mRelativeProfit,\n        'sharpe': avgSharpe,\n        'trades': avgTrades,\n        'averageExposure': avgExposure\n    }\n\n\ndef createConfig(\n        TradeSetting, Database, DateRange,\n        candleSize=10, gekko_config=None, Debug=False\n):\n    TradeMethod = list(TradeSetting.keys())[0]\n    CONFIG = {\n        \"watch\": Database,\n        \"paperTrader\": {\n            \"fee\": 0.25,  # declare deprecated 'fee' so keeps working w/ old gekko;\n            \"feeMaker\": 0.15,\n            \"feeTaker\": 0.25,\n            \"feeUsing\": 'maker',\n            \"slippage\": 0.05,\n            \"simulationBalance\": {\"asset\": 1, \"currency\": 100},\n            \"reportRoundtrips\": True,\n            \"enabled\": True,\n        },\n        \"tradingAdvisor\": {\n            \"enabled\": True,\n            \"method\": TradeMethod,\n            \"candleSize\": candleSize,  # candleSize: smaller = heavier computation + better possible results;\n            \"historySize\": 10,\n        },\n        TradeMethod: TradeSetting[TradeMethod],\n        \"backtest\": {\"daterange\": DateRange},\n        \"performanceAnalyzer\": {\"riskFreeReturn\": 2, \"enabled\": True},\n        \"valid\": True,\n        \"data\": {\n            \"candleProps\": [\n                \"id\", \"start\", \"open\", \"high\", \"low\", \"close\", \"vwp\", \"volume\", \"trades\"\n            ],\n            \"indicatorResults\": True,\n            \"report\": True,\n            \"roundtrips\": True,\n            \"trades\": True,\n        },\n        \"backtestResultExporter\": {\n            \"enabled\": True,\n            \"writeToDisk\": False,\n            \"data\": {\n                \"stratUpdates\": False,\n                \"roundtrips\": True,\n                \"stratCandles\": False,\n                \"stratCandleProps\": [\n                    \"open\"\n                ],\n                \"trades\": False\n            }\n        }\n    }\n\n    if gekko_config == None:\n        gekko_config = CONFIG\n\n    return gekko_config\n"
  },
  {
    "path": "evaluation/gekko/dataset.py",
    "content": "#!/bin/python\nimport random\nimport datetime\nfrom .API import httpPost\n\n\ndef getAllScanset(GekkoURL):\n    URL = GekkoURL + '/api/scansets'\n    RESP = httpPost(URL)\n    return RESP['datasets']\n\n\ndef selectCandlestickData(GekkoURL,\n                          exchange_source=None,\n                          avoidCurrency=None,\n                          minDays=None):\n    DataSetPack = getAllScanset(GekkoURL)\n    specKeys = ['exchange', 'currency', 'asset']\n    scanset = []\n\n    # IF EXCHANGE SPECIFICATIONS ARE TO BRE IGNORED;\n    if 'autoselect' in exchange_source.keys():\n        if exchange_source['autoselect']:\n            exchange_source = None\n\n    # SEARCH CANDIDATE DATASETS AMONG THOSE OBTAINED FROM GEKKO API;\n    for s in DataSetPack:\n        Valid = True\n        for k in specKeys:\n            if exchange_source and s[k] != exchange_source[k]:\n                Valid = False\n        if avoidCurrency and not exchange_source:\n            if s[\"asset\"] == avoidCurrency:\n                Valid = False\n        if Valid:\n            scanset.append(s)\n\n    # IN CASE NO CANDLESTICK DATASET IS COMPATIBLE;\n    if len(scanset) == 0:\n        if exchange_source:\n            raise RuntimeError(\n                \"scanset not available: %s\\n\\tscanset found: %s\" %\n                (exchange_source, DataSetPack)\n            )\n\n        else:\n            raise RuntimeError(\"no scanset available! check Gekko candle database.\")\n\n    # SEARCH ON ALL FOUND SCANSETS;\n    for EXCHANGE in scanset:\n        ranges = EXCHANGE['ranges']\n        # no ranges found?\n        if not ranges:\n            # print(\"No scansets found for %s\" % EXCHANGE)\n            continue\n        range_spans = [x['to'] - x['from'] for x in ranges]\n        LONGEST = range_spans.index(max(range_spans))\n        EXCHANGE['max_span'] = range_spans[LONGEST]\n        EXCHANGE['max_span_index'] = LONGEST\n\n    # COMPILE MOST INTERESTING SCANSETS;\n    availableScanset = [exchange for exchange in scanset\n                        if 'max_span' in exchange.keys()]\n\n    exchange_longest_spans = [x['max_span'] for x in availableScanset]\n\n    if minDays is not None:\n        exchange_longest_spans = [\n            span for span in exchange_longest_spans\n            if span > minDays * 24 * 3600\n        ]\n\n    # Without scansets we cannot continue.\n    if not exchange_longest_spans:\n        print(\"FATAL: No scanset available.\")\n        return None\n\n    best_exchange = exchange_longest_spans.index(max(exchange_longest_spans))\n    best_exchange_span =\\\n        availableScanset[best_exchange]['max_span_index']\n    chosenScansetRange =\\\n        availableScanset[best_exchange]['ranges'][best_exchange_span]\n\n    chosenScansetSpecifications = {\n        K: availableScanset[best_exchange][K]\n        for K in availableScanset[best_exchange]\n        if K in specKeys\n    }\n\n    return chosenScansetSpecifications, chosenScansetRange\n\n\ndef getCandles(globalconf, DateRange, Dataset, size=100):\n    base = random.choice(globalconf.GekkoURLs)\n    URL = base + \"/api/getCandles\"\n    CONFIG = {\n        \"watch\": Dataset.specifications,\n        \"daterange\": DateRange,\n        \"adapter\": \"sqlite\",\n        \"sqlite\": {\n            \"path\": \"plugins/sqlite\",\n            \"dataDirectory\": \"history\",\n            \"version\": 0.1,\n            \"dependencies\": [{\"module\": \"sqlite3\", \"version\": \"3.1.4\"}],\n        },\n        \"candleSize\": size,\n    }\n    RESULT = httpPost(URL, CONFIG)\n    return RESULT\n\n\ndef getDateRange(Limits, deltaDays=3):\n    DateFormat = \"%Y-%m-%d %H:%M:%S\"\n    deltams = deltaDays * 24 * 60 * 60\n    DateRange = {\n        \"from\": \"%s\" % epochToString(Limits['to'] - deltams),\n        \"to\": \"%s\" % epochToString(Limits['to']),\n    }\n    return DateRange\n\n\ndef getRandomDateRange(Limits, deltaDays):\n    FLms = Limits['from']\n    TLms = Limits['to']\n    deltams = deltaDays * 24 * 60 * 60\n    if deltams > (TLms - FLms):\n        print(\n            \"Fatal: deltaDays on Settings.py set to a value bigger than current dataset.\\n Edit Settings file to fit your chosen candlestick data.\"\n        )\n        exit(1)\n    Start = random.randint(FLms, TLms - deltams) if deltaDays else FLms\n    End = (Start + deltams) if deltaDays else TLms\n    DateRange = {\n        \"from\": \"%s\" % epochToString(Start),\n        \"to\": \"%s\" % epochToString(End)\n    }\n    return DateRange\n\n\ndef epochToString(D):\n    return datetime.datetime.utcfromtimestamp(D).strftime(\n        \"%Y-%m-%d %H:%M:%S\"\n    )\n"
  },
  {
    "path": "evaluation/gekko/datasetOperations.py",
    "content": "#!/bin/python\nimport evaluation\nimport random\n\n\nclass CandlestickDataset():\n    def __init__(self, specifications, daterange):\n        self.daterange = daterange\n        self.specifications = specifications\n\n    def restrain(self, deltaDays):\n        if not deltaDays:\n            return\n\n        deltams = deltaDays * 24 * 60 * 60\n        restrainedInit = self.daterange['to'] - deltams\n        self.daterange['from'] = max(self.daterange['from'], restrainedInit)\n\n    def textDaterange(self):\n        return dateRangeToText(self.daterange)\n\n    def textSpecifications(self):\n        message = \"%s/%s @%s\" % (self.specifications[\"asset\"],\n                                 self.specifications[\"currency\"],\n                                 self.specifications[\"exchange\"])\n\n        return message\n\n    def __str__(self):\n        return self.textSpecification()\n\n\ndef getRandomSectorOfDataset(sourceDataset, deltaDays):\n\n    G = evaluation.gekko.dataset.getRandomDateRange\n    dateRange = G(sourceDataset.daterange, deltaDays)\n    newDataset = CandlestickDataset(sourceDataset.specifications,\n                                    dateRange)\n\n    return newDataset\n\n\ndef getLocaleDataset(World, Type='evolution'):\n\n    localeDataset = []\n    for DS in range(World.conf.backtest.ParallelCandlestickDataset):\n        sourceDataset = random.choice(World.EnvironmentParameters['evolution'])\n\n        newDataset = getRandomSectorOfDataset(sourceDataset,\n                                              World.conf.backtest.deltaDays)\n        localeDataset.append(newDataset)\n\n    return localeDataset\n\n\ndef dateRangeToText(dateRange):\n    def convertDateRange(x):\n        if type(x) == int:\n            return evaluation.gekko.dataset.epochToString(x) \n        else:\n            return x\n\n    Range = [\n        convertDateRange(dateRange[x]) for x in ['from', 'to']\n    ]\n    Text = \"%s to %s\" % (Range[0], Range[1])\n    return Text\n"
  },
  {
    "path": "evaluation/gekko/statistics.py",
    "content": "#!/bin/python\n\nfrom deap import tools\nimport numpy as np\n\nepochStatisticsNames = {\n    'avg': 'Average profit',\n    'std': 'Profit variation',\n    'min': 'Minimum profit',\n    'max': 'Maximum profit',\n    'size': 'Population size',\n    'maxsize': 'Max population size',\n    'avgTrades': 'Avg trade number',\n    'sharpe': 'Avg sharpe ratio',\n    'avgExposure': \"Avg exposure time\",\n    'nbElderDies': 'Elder dies count'\n}\n\nperiodicStatisticsNames = {\n    'evaluationScore': \"Evaluation Score\",\n    'evaluationScoreOnSecondary': \"Score on Secondary Dataset\"\n}\n\n\n\ndef compileStats(locale):\n    # --get proper evolution statistics;\n    Stats = locale.stats.compile(locale.population)\n    Stats['dateRange'] = ' '.join([DR.textDaterange()\n                                   for DR in locale.Dataset])\\\n                                       if not locale.EPOCH else None\n    Stats['maxsize'] = locale.POP_SIZE\n    Stats['size'] = len(locale.population)\n    Stats['avgTrades'] = locale.extraStats['avgTrades']\n    Stats['avgExposure'] = locale.extraStats['avgExposure'] \n    #Stats['nbElderDies'] = locale.extraStats['nbElderDies']\n    Stats['sharpe'] = np.mean([x.fitness.values[1] for x in locale.population])\n    Stats['evaluationScoreOnSecondary'] = locale.lastEvaluationOnSecondary\n    Stats['evaluationScore'] = locale.lastEvaluation\n    locale.lastEvaluationOnSecondary = None\n    locale.lastEvaluation = None\n    Stats['id'] = locale.EPOCH\n    locale.EvolutionStatistics.append(Stats)\n    locale.World.logger.write_evolution_logs(\n        locale.EPOCH, locale.EvolutionStatistics, locale.name\n    )\n\n\ndef showStatistics(locale):\n    # show information;\n    Stats = locale.EvolutionStatistics[locale.EPOCH]\n    print(\"EPOCH %i\\t&%i\" % (locale.EPOCH, locale.extraStats['nb_evaluated']))\n    statnames = ['max', 'avg', 'min',\n                 'std', 'size', 'maxsize',\n                 'avgTrades', 'sharpe', 'avgExposure',\n                 # 'nbElderDies'\n    ]\n    statisticsText = []\n    for s in range(len(statnames)):\n        SNAME = statnames[s]\n        SVAL = Stats[SNAME]\n        currentStatisticsText = \"%s\" % epochStatisticsNames[SNAME]\n        if not SVAL % 1:\n            currentStatisticsText += \" %i\" % SVAL\n        else:\n            currentStatisticsText += \" %.3f\" % SVAL\n        statisticsText.append(currentStatisticsText)\n\n    columnWidth = max([len(STXT) for STXT in statisticsText]) + 3\n    for j in range(0, len(statisticsText), 2):\n        print(''.join(word.ljust(columnWidth) for word in statisticsText[j:j+2]))\n\n    print()\n"
  },
  {
    "path": "exchangerun.csv",
    "content": "EXCHANGE,CURRENCY,ASSET,STRATEGY\nbinance,usdt,btc,RSI_BULL_BEAR_ADX\nbinance,usdt,bcc,RSI_BULL_BEAR_ADX\nbinance,usdt,ltc,RSI_BULL_BEAR_ADX\nbinance,usdt,neo,RSI_BULL_BEAR_ADX\nbinance,usdt,qtum,RSI_BULL_BEAR_ADX\nbinance,usdt,bnb,RSI_BULL_BEAR_ADX\nbinance,usdt,eth,RSI_BULL_BEAR_ADX\n"
  },
  {
    "path": "gekko_evolution.yml",
    "content": "- name: prepare machine software and install Gekko Trading Bot\n  hosts: all\n  remote_user: ec2-user \n  become_method: sudo\n  tasks:\n\n  - name: update cache\n    become: yes\n    command: \"yum update -y\"\n\n  - name: Install environment components\n    become: yes\n\n    shell: yum install -y {{item}}\n    with_items:\n    - git\n    - tmux\n\n  - name: get NODEjs\n    become: yes\n    #shell: \"curl -sL https://deb.nodesource.com/setup_8.x | sudo -E bash -\" UBUNTU LINUX;\n    shell: \"curl -sL https://rpm.nodesource.com/setup_8.x | bash -\"\n\n  - name: install NODEjs\n    become: yes\n    command: \"yum install -y nodejs\"\n\n  - name: clone gekko git repo\n    command: \"git clone https://github.com/askmike/gekko\"\n    ignore_errors: yes\n    \n  - name: install gekko nodejs libs\n    command: \"npm install --only=production\"\n    args:\n      chdir: \"gekko\"\n\n  - name: install problematic nodejs lib\n    become: yes\n    command: \"npm install -G sqlite3\"\n\n  - name: edit gekko UI config\n    shell: sed -i 's/{{ item }}/0.0.0.0/' gekko/web/vue/UIconfig.js\n    with_items:\n    - 127.0.0.1\n\n  - name: edit gekko UI config pt2\n    command: \"sed -i 's/localhost/{{ inventory_hostname }}/' gekko/web/vue/UIconfig.js\"\n\n  - copy:\n      src: ~/gekko/history\n      dest: ~/gekko\n\n  - name: run gekko UI\n    command: \"tmux new -d -s Gekko 'node gekko.js --ui; detach \\\\;'\"\n    args:\n      chdir: \"gekko\"\n"
  },
  {
    "path": "japonicus/Settings.py",
    "content": "#!/bin/python\nimport js2py\nfrom pathlib import Path\n\nfrom .configStrategies import cS\nfrom .configIndicators import cI\n\nimport os\nimport pytoml\n\n\nclass makeSettings(dict):\n    def __init__(self, entries):\n        for K in entries.keys():\n            if type(entries[K]) == dict:\n                entries[K] = makeSettings(entries[K])\n        self.__dict__.update(entries)\n        self.update(entries)\n\n\ndef getSettings(SettingsFiles=[], specific=None):\n    HOME = str(Path.home())\n\n    settings = {}\n\n    for SettingsFile in SettingsFiles:\n        settings[SettingsFile] = loadTomlSettings(SettingsFile)\n\n    return settings\n\n    s = {\n        # gekko global settings;\n        'global': loadTomlSettings('global'),\n        # gekko backtest settings;\n        'backtest': loadTomlSettings('backtest'),\n        # evaluation break settings;\n        'evalbreak': loadTomlSettings('evalbreak'),\n        # genetic algorithm settings;\n        'generations': loadTomlSettings('generations'),\n\n        'dataset': loadTomlSettings('dataset'),\n\n        'strategies': cS,\n        'indicators': cI,\n        'skeletons': {\n            'ontrend': {\n                \"SMA_long\": 1000,\n                \"SMA_short\": 50\n            }\n        }\n    }\n\n    if specific is not None:\n        if not specific:\n            return makeSettings(s)\n        else:\n            return makeSettings(s[specific])\n\n    return s\n\n\ndef loadTomlSettings(settingsDivisionName):\n    userSettingsAndDefaultSettings = [\n        '%s.toml' % settingsDivisionName,\n        '_%s.toml' % settingsDivisionName\n    ]\n    for targetFile in userSettingsAndDefaultSettings:\n        filePath = os.path.join('settings', targetFile)\n        if os.path.isfile(filePath):\n            Settings = pytoml.load(open(filePath))\n            return Settings\n\n    exit(\"Failed to load settings! %s\" % settingsDivisionName)\n\n\ndef get_configjs(filename=\"example-config.js\"):\n    with open(filename, \"r\") as f:\n        text = f.read()\n    text = text.replace(\"module.exports = config;\",\"config;\")\n    return js2py.eval_js(text).to_dict()\n"
  },
  {
    "path": "japonicus/__init__.py",
    "content": "#!/bin/python\n\nfrom .japonicus import *\nfrom . import options\nfrom . import interface\n"
  },
  {
    "path": "japonicus/configIndicators.py",
    "content": "#!/bin/python\ncI = {\n    \"ADX\": {\"active\": True, \"period\": 14, \"thresholds.up\": 70, \"thresholds.down\": 50},\n    \"ATR\": {\"active\": True, \"period\": 14, \"thresholds.up\": 70, \"thresholds.down\": 50},\n    \"PPO\": {\n        \"active\": True,\n        \"short\": (6, 18),  # short EMA\n        \"long\": (13, 39),  # long EMA\n        \"signal\": (1, 18),  # 100 * (shortEMA - longEMA / longEMA)\n        \"thresholds.down\": (-0.5, 0.),  # trend thresholds\n        \"thresholds.up\": (0., 0.5),  # trend thresholds\n    },\n    \"TSI\": {\n        \"active\": True,\n        \"thresholds.up\": (15, 35),\n        \"thresholds.down\": (-35, -15),\n        \"short\": (3, 12),\n        \"long\": (15, 35),\n    },\n    \"LRC\": {\n        \"active\": True,\n        \"thresholds.up\": (15, 35),\n        \"thresholds.down\": (-35, -15),\n        \"depth\": (3, 18),\n    },\n    \"RSI\": {\n        \"active\": True,\n        \"interval\": (7, 21),  # weight\n        \"thresholds.down\": (15, 45),  # trend thresholds\n        \"thresholds.up\": (45, 140),  # trend thresholds\n    },\n    \"SMMA\": {\n        \"active\": True,\n        \"weight\": (7, 16),\n        \"thresholds.up\": (0, 0.1),\n        \"thresholds.down\": (-0.1, 0),\n    },\n    \"DEMA\": {\n        \"active\": True,\n        \"short\": (7, 15),\n        \"long\": (12, 35),\n        \"thresholds.up\": (0, 0.1),\n        \"thresholds.down\": (-0.1, 0),\n    },\n    \"CCI\": {\n        \"active\": True,\n        \"consistant\": (7, 21),  # constant multiplier. 0.015 gets to around 70% fit\n        \"history\": (45, 135),  # history size, make same or smaller than history\n        \"thresholds.down\": (-150, -50),  # trend thresholds\n        \"thresholds.up\": (50, 150),  # trend thresholds\n        \"thresholds.persistence\": (4, 10),\n    },\n}\n"
  },
  {
    "path": "japonicus/configStrategies.py",
    "content": "#!/bin/python\n\nNEG = lambda v: (-v[1], -v[0])\ncS = {\n    # Define values for strat settings for strategies to be used\n    # on japonicus;\n    # Each value can be a tuple of limits or just a base value.\n    \"ontrend\" : {\n    \"bull_momentum_high\": 80,\n    \"bull_momentum_low\": 60,\n    \"bear_momentum_high\": 50,\n    \"bear_momentuum_low\": 20,\n    \"sec_high\": 70,\n    \"sec_low\": 50\n\n        },\n    \"rsi_bbands\": {\n\n\"NbDevUp\": 2,\n\"NbDevDn\": 2,\n\"TimePeriod\": 9,\n\"rsi_high\": 60,\n\"rsi_low\": 20,\n\"min_hold\": 5,\n\"swing_trade\": 0.5\n},\n    \n\"RSI_BULL_BEAR\" : {\n\n# SMA Trends\n\"SMA_long\": 1000,\n\"SMA_short\": 50,\n\n# BULL\n\"BULL_RSI\": 10,\n\"BULL_RSI_high\":  80,\n\"BULL_RSI_low\" : 60,\n\n# BEAR\n\"BEAR_RSI\": 15,\n\"BEAR_RSI_high\": 50,\n\"BEAR_RSI_low\" : 20\n},\n    \n\"RSI_BULL_BEAR_ADXold\" : {\n\n# SMA Trends\n\"SMA_long\": 1000,\n\"SMA_short\": 50,\n\n# BULL\n\"BULL_RSI\": 10,\n\"BULL_RSI_high\":  80,\n\"BULL_RSI_low\" : 60,\n\n# BEAR\n\"BEAR_RSI\": 15,\n\"BEAR_RSI_high\": 50,\n\"BEAR_RSI_low\" : 20,\n\n# ADX\n\"ADX\": 3,\n\"ADX_high\": 70,\n\"ADX_low\": 50\n},\n    \n\"RSI_BULL_BEAR_ADX\" : {\n\n# SMA Trends\n\"SMA_long\": 1000,\n\"SMA_short\": 50,\n\n# BULL\n\"BULL_RSI\": 10,\n\"BULL_RSI_high\":  80,\n\"BULL_RSI_low\" : 60,\n\n# BEAR\n\"BEAR_RSI\": 15,\n\"BEAR_RSI_high\": 50,\n\"BEAR_RSI_low\" : 20,\n\n# ADX\n\"ADX\": 3,\n\"ADX_high\": 70,\n\"ADX_low\": 50,\n\n\"BULL_MOD_high\": 5,\n\"BULL_MOD_low\": -5,\n\"BEAR_MOD_high\": 15,\n\"BEAR_MOD_low\": -5\n},\n    \n\"Bestone\" :{\n    \"customMACDSettings\": {\n        \"optInFastPeriod\": (3,10),\n        \"optInSlowPeriod\": (20,50),\n        \"optInSignalPeriod\": (5,15)\n    },\n\n    \"customEMAshortSettings\": {\n        \"optInTimePeriod\": (5,15)\n    },\n\n    \"customEMAlongSettings\": {\n        \"optInTimePeriod\": (15,26)\n    },\n\n    \"customSTOCHSettings\": {\n        \"optInFastKPeriod\": (6, 14),\n        \"optInSlowKPeriod\": (2,5),\n        \"optInSlowKMAType\": (1,1),\n        \"optInSlowDPeriod\": (2,5),\n        \"optInSlowDMAType\": (1,1)\n    },\n\n    \"customRSISettings\": {\n        \"optInTimePeriod\": (7,20)\n    }\n\n},\n\n    \n    \"PPOTSI\":{\n        \"PPO.short\": (3,16),\n        \"PPO.long\": (12,35),\n        \"PPO.signal\":(3,21),\n        \"PPO.up\": (0., 1),\n        \"PPO.down\": (-1, 0.),\n        \"TSI.up\": (10,40),\n        \"TSI.down\": (-40,-10),\n        \"TSI.short\": (3,18),\n        \"TSI.long\": (10,42),\n        \"persistence\": (1,10)\n    },\n    \"Supertrend\": {\n        \"atrEma\":(1,10),\n        \"bandFactor\": (1,10)\n        },\n    \"PPOLRC\":{\n        \"PPO.short\": (3,12),\n        \"PPO.long\": (15,35),\n        \"PPO.signal\":(3,18),\n        \"PPO.up\": (0., 0.5),\n        \"PPO.down\": (-0.5, 0.),\n        \"LRC.up\": (15,35),\n        \"LRC.down\": (-35,-15),\n        \"LRC.depth\": (3,18),\n        \"persistence\": (1,5)\n    },\n    \"buyatsellat\": {\n        'buyat': (1.03,1.20),\n\t'sellat': (0.92, 0.97), \n\t'stop_loss_pct': (0.87, 0.95), \n\t'sellat_up': (1.01,1.20)\n    },\n    \"buyatsellatPPO\": {\n        'buyat': (1.03,1.20),\n        'sellat': (0.92, 0.97), \n        'stop_loss_pct': (0.87, 0.95), \n        'sellat_up': (1.01,1.20),\n        \"short\": (6,18), # short EMA\n        \"long\": (13,39), # long EMA\n        \"signal\": (1,18), # 100 * (shortEMA - longEMA / longEMA)\n        \"thresholds.down\": (-0.5,0.), # trend thresholds\n        \"thresholds.up\": (0.,0.5), # trend thresholds\n        \"thresholds.persistence\": (2,10), # trend duration(count up by tick) thresholds\n    },\n    \"DEMA\":{\n        \"short\": (1,10), # short EMA\n        \"long\": (20,50), # long EMA\n        \"thresholds.down\": (-0.5,0.1), # trend thresholds\n        \"thresholds.up\": (-0.1,0.5), # trend thresholds\n    },\n    \"MACD\":{\n        \"short\": (1,10), # short EMA\n        \"long\": (20,50), # long EMA\n        \"signal\": (9,18), # shortEMA - longEMA diff\n        \"thresholds.down\": (-0.5,0.), # trend thresholds\n        \"thresholds.up\": (0.,0.5), # trend thresholds\n        \"thresholds.persistence\": (2,10), # trend duration(count up by tick) thresholds\n    },\n    \"PPO\":{\n        \"short\": (6,18), # short EMA\n        \"long\": (13,39), # long EMA\n        \"signal\": (1,18), # 100 * (shortEMA - longEMA / longEMA)\n        \"thresholds.down\": (-0.5,0.), # trend thresholds\n        \"thresholds.up\": (0.,0.5), # trend thresholds\n        \"thresholds.persistence\": (2,10), # trend duration(count up by tick) thresholds\n    },\n    # Uses one of the momentum indicators but adjusts the thresholds when PPO is bullish or bearish\n    # Uses settings from the ppo and momentum indicator config block\n    \"varPPO\":{ # TODO: merge PPO config\n        \"short\": (6,18), # short EMA\n        \"long\": (13,39), # long EMA\n        \"signal\": (1,18), # 100 * (shortEMA - longEMA / longEMA)\n        \"thresholds.down\": (-0.5,0.), # trend thresholds\n        \"thresholds.up\": (0.,0.5), # trend thresholds\n        \"thresholds.persistence\": (0,4), # trend duration(count up by tick) thresholds\n        \"momentum\": (0, 2.99999), # index of [\"RSI\", \"TSI\", \"UO\"]\n        # new threshold is default threshold + PPOhist * PPOweight\n        \"weightLow\": (60, 180),\n        \"weightHigh\": (-180, -60),\n    },\n    \"RSI\":{\n        \"interval\": (7,21), # weight\n        \"thresholds.low\": (15,45), # trend thresholds\n        \"thresholds.high\": (45,140), # trend thresholds\n        \"thresholds.persistence\": (4,10), # trend duration(count up by tick) thresholds\n    },\n    \"StochRSI\":{\n        \"interval\": (7,21), # weight\n        \"thresholds.low\": (15,45), # trend thresholds\n        \"thresholds.high\": (45,140), # trend thresholds\n        \"thresholds.persistence\": (4,10), # trend duration(count up by tick) thresholds\n    },\n    \"CCI\":{\n        \"consistant\": (7,21), # constant multiplier. 0.015 gets to around 70% fit\n        \"history\": (45,135), # history size, make same or smaller than history\n        \"thresholds.down\": (-150,-50), # trend thresholds\n        \"thresholds.up\": (50,150), # trend thresholds\n        \"thresholds.persistence\": (4,10), # trend duration(count up by tick) thresholds\n    },\n    \"UO\":{\n        \"first.weight\": (2,8), # \n        \"first.period\": (4.5,14), # \n        \"second.weight\": (1,4), # \n        \"second.period\": (7,28), # \n        \"third.weight\": (0.5,2), # \n        \"third.period\": (14,56), # \n        \"thresholds.low\": (15,45), # trend thresholds\n        \"thresholds.high\": (45,140), # trend thresholds\n        \"thresholds.persistence\": (0,4), # trend duration(count up by tick) thresholds\n    },\n    \"MRBB\": {\n        \"short\": (3, 12),\n        \"long\": (12, 32),\n        \"signal\": (6, 23),\n        \"interval\": (7, 23),\n        \"crosspersistence\": (7, 30),\n        \"macdhigh\": (0.1,0.6),\n        \"macdlow\": (-0.6,-0.1),\n        \"rsihigh\": (30,100),\n        \"rsilow\": (1,35),\n        \"bbands.TimePeriod\": (16,22),\n        \"bbands.NbDevUp\": (1,3),\n        \"bbands.NbDevDn\": (1,3),\n        \"bbands.MAType\": (1,3)\n\t}\n}\n"
  },
  {
    "path": "japonicus/evolution_generations.py",
    "content": "#!/bin/python\nimport json\nimport time\nimport sys\n\nimport promoterz\nimport evaluation\n\nfrom . import interface\n\nfrom .Settings import getSettings, makeSettings\nimport stratego\nfrom functools import partial\n\nimport evaluation.gekko.datasetOperations as datasetOperations\n\n\nStrategyFileManager = None\n\n\n# TEMPORARY ASSIGNMENT OF EVAL FUNCTIONS; SO THINGS REMAIN ¿SANE;\ndef indicatorEvaluate(\n    StrategyFileManager,\n    constructPhenotype,\n    genconf,\n    Datasets,\n    Individual,\n    gekkoUrl,\n):\n    phenotype = constructPhenotype(Individual)\n    StratName = StrategyFileManager.checkStrategy(phenotype)\n    phenotype = {StratName: phenotype}\n    SCORE = evaluation.gekko.backtest.Evaluate(\n        genconf, Datasets, phenotype, gekkoUrl\n    )\n    return SCORE\n\n\ndef standardEvaluate(constructPhenotype,\n                     genconf, Datasets, Individual, gekkoUrl):\n    phenotype = constructPhenotype(Individual)\n    phenotype = {Individual.Strategy: phenotype}\n    SCORE = evaluation.gekko.backtest.Evaluate(\n        genconf, Datasets, phenotype, gekkoUrl\n    )\n    return SCORE\n\n\ndef benchmarkEvaluate(constructPhenotype,\n                      genconf, Datasets, Individual, gekkoUrl):\n    phenotype = constructPhenotype(Individual)\n    phenotype = {Individual.Strategy: phenotype}\n    SCORE = evaluation.benchmark.benchmark.Evaluate(\n        genconf, phenotype\n    )\n    return SCORE\n\n\ndef grabDatasets(conf):\n    # CHECK HOW MANY EVOLUTION DATASETS ARE SPECIFIED AT SETTINGS;\n    evolutionDatasetNames = ['dataset_source']\n    evolutionDatasets = []\n    for DS in range(1, 100):\n        datasetConfigName = 'dataset_source%i' % DS\n        if datasetConfigName in conf.dataset.__dict__.keys():\n            evolutionDatasetNames.append(datasetConfigName)\n\n    # --GRAB PRIMARY (EVOLUTION) DATASETS\n    for evolutionDatasetName in evolutionDatasetNames:\n        D = evaluation.gekko.dataset.selectCandlestickData(\n            conf.Global.GekkoURLs[0],\n            exchange_source=conf.dataset.__dict__[evolutionDatasetName],\n            minDays=conf.backtest.deltaDays\n        )\n        evolutionDatasets.append(datasetOperations.CandlestickDataset(*D))\n        try:\n            evolutionDatasets[-1].restrain(conf.dataset.dataset_span)\n        except Exception:\n            print(\n                'dataset_ span not configured for evolutionDatasetName. skipping...')\n\n    # --GRAB SECONDARY (EVALUATION) DATASET\n    try:\n        Avoid = evolutionDatasets[0].specifications['asset']\n        D = evaluation.gekko.dataset.selectCandlestickData(\n            conf.Global.GekkoURLs[0],\n            exchange_source=conf.dataset.eval_dataset_source,\n            avoidCurrency=None,\n            minDays=conf.backtest.deltaDays\n        )\n        if D is not None:\n            evaluationDatasets = [datasetOperations.CandlestickDataset(*D)]\n            evaluationDatasets[0].restrain(conf.dataset.eval_dataset_span)\n        else:\n            evaluationDatasets = []\n    except RuntimeError:\n        evaluationDatasets = []\n        print(\"Evaluation dataset not found.\")\n\n    return evolutionDatasets, evaluationDatasets\n\n\ndef Generations(\n        EvaluationModule,\n        japonicusOptions,\n        EvaluationMode,\n        settings,\n        options,\n        web=None):\n\n    # --LOAD SETTINGS;\n    conf = makeSettings(settings)\n\n    # --APPLY COMMAND LINE GENCONF SETTINGS;\n    for parameter in conf.generation.__dict__.keys():\n        if parameter in options.__dict__.keys():\n            if options.__dict__[parameter] != None:\n                conf.generation[parameter] = options.__dict__[parameter]\n\n    GenerationMethod = promoterz.functions.selectRepresentationMethod(\n        japonicusOptions[\"GenerationMethod\"]\n    )\n\n    # --MANAGE Evaluation Modes;\n    if EvaluationMode == 'indicator':\n        # global StrategyFileManager\n        StrategyFileManager = stratego.gekko_strategy.StrategyFileManager(\n            conf.Global.gekkoPath, conf.indicator\n        )\n        Evaluate = partial(indicatorEvaluate, StrategyFileManager)\n        Strategy = options.skeleton\n    # --for standard methods;\n    else:\n        Strategy = EvaluationMode\n        if options.benchmarkMode:\n            Evaluate = benchmarkEvaluate\n            evolutionDatasets, evaluationDatasets = [], []\n            conf.gen.minimumProfitFilter = None\n        else:\n            Evaluate = standardEvaluate\n            evolutionDatasets, evaluationDatasets = grabDatasets(\n                conf\n            )\n\n    # -- PARSE TARGET PARAMETERS\n    TargetParameters = promoterz.parameterOperations.flattenParameters(\n        japonicusOptions[\"TargetParameters\"])\n    TargetParameters = promoterz.parameterOperations.parameterValuesToRangeOfValues(\n        TargetParameters, conf.generation.parameter_spread\n    )\n    GlobalTools = GenerationMethod.getToolbox(Strategy,\n                                              conf.generation,\n                                              TargetParameters)\n    RemoteHosts = evaluation.gekko.API.loadHostsFile(conf.Global.RemoteAWS)\n    conf.Global.GekkoURLs += RemoteHosts\n    if RemoteHosts:\n        print(\"Connected Remote Hosts:\\n%s\" % ('\\n').join(RemoteHosts))\n        if EvaluationMode == 'indicator':\n            exit('Indicator mode is yet not compatible with multiple hosts.')\n\n    # --INITIALIZE LOGGER;\n    todayDate = time.strftime(\"%Y_%m_%d-%H.%M.%S\", time.gmtime())\n    if evolutionDatasets:\n        ds_specs = evolutionDatasets[0].specifications\n        logfilename = \"%s-%s-%s-%s-%s\" % (\n            Strategy,\n            ds_specs['exchange'],\n            ds_specs['currency'],\n            ds_specs['asset'],\n            todayDate\n        )\n    else:\n        logfilename = \"benchmark%s\" % todayDate\n    Logger = promoterz.logger.Logger(logfilename)\n\n    # --PRINT RUNTIME ARGS TO LOG HEADER;\n    ARGS = ' '.join(sys.argv)\n    Logger.log(ARGS, target='Header')\n\n    # --SHOW PARAMETER INFO;\n    if Strategy:\n        Logger.log(\"Evolving %s strategy;\\n\" % Strategy)\n    Logger.log(\"evaluated parameters ranges:\", target=\"Header\")\n    for k in TargetParameters.keys():\n        Logger.log(\n            \"%s%s%s\\n\" % (k, \" \" * (30 - len(k)), TargetParameters[k]),\n            target=\"Header\"\n        )\n\n    # --LOG CONFIG INFO;\n    configInfo = json.dumps(conf.generation.__dict__, indent=4)\n    Logger.log(configInfo, target=\"Header\", show=False)\n\n    # --SHOW DATASET INFO;\n    EvaluationModule.showPrimaryInfo(Logger,\n                                     evolutionDatasets,\n                                     evaluationDatasets)\n\n    # --INITIALIZE WORLD WITH CANDLESTICK DATASET INFO; HERE THE GA KICKS IN;\n    GlobalTools.register('Evaluate', Evaluate,\n                         GlobalTools.constructPhenotype, conf.backtest)\n    GlobalTools.register(\"ApplyResult\", EvaluationModule.ResultToIndividue)\n    GlobalTools.register(\"showIndividue\", EvaluationModule.showIndividue)\n\n    # --THIS LOADS A DATERANGE FOR A LOCALE;\n    if options.benchmarkMode:\n        def onInitLocale(World):\n            Dataset = [\n                datasetOperations.CandlestickDataset(\n                    {},\n                    {\n                        'from': 0,\n                        'to': 0\n                    }\n                )]\n            return Dataset\n    else:\n        def onInitLocale(World):\n            Dataset = datasetOperations.getLocaleDataset(World)\n            return Dataset\n\n    # Select run loops;\n    populationLoops = [promoterz.sequence.locale.standard_loop.execute]\n    worldLoops = [promoterz.sequence.world.parallel_world.execute]\n\n    # Initalize World;\n    World = promoterz.world.World(\n        GlobalTools=GlobalTools,\n        populationLoops=populationLoops,\n        worldLoops=worldLoops,\n        conf=conf,\n        TargetParameters=TargetParameters,\n        EnvironmentParameters={\n            'evolution':  evolutionDatasets,\n            'evaluation': evaluationDatasets\n        },\n        onInitLocale=onInitLocale,\n        web=web,\n    )\n    World.logger = Logger\n    World.EvaluationStatistics = []\n\n    World.EvaluationModule = EvaluationModule\n    World.seedEnvironment()\n\n    World.logger.updateFile()\n\n    # INITALIZE EVALUATION PROCESSING POOL\n    World.parallel = World.EvaluationModule.EvaluationPool(\n        World,\n        conf.Global.GekkoURLs,\n        conf.backtest.ParallelBacktests,\n        conf.generation.showIndividualEvaluationInfo,\n        )\n\n    # --GENERATE INITIAL LOCALES;\n    for l in range(conf.generation.NBLOCALE):\n        World.generateLocale()\n\n    # --RUN EPOCHES;\n    while World.EPOCH < World.conf.generation.NBEPOCH:\n        World.runEpoch()\n        if conf.evalbreak.evaluateSettingsPeriodically and not options.benchmarkMode:\n            if not World.EPOCH % conf.evalbreak.evaluateSettingsPeriodically:\n                promoterz.evaluationBreak.showResults(World)\n        if not World.EPOCH % 10:\n            print(\"Total Evaluations: %i\" % World.totalEvaluations)\n\n    # RUN ENDS. SELECT INDIVIDUE, LOG AND PRINT STUFF;\n    # FinalBestScores.append(Stats['max'])\n    print(World.EnvironmentParameters)\n    # After running EPOCHs, select best candidates;\n    if not options.benchmarkMode:\n        promoterz.evaluationBreak.showResults(World)\n    print(\"\")\n    print(\"\\t\\t.RUN ENDS.\")\n"
  },
  {
    "path": "japonicus/halt.py",
    "content": "#!/bin/python\n\nimport signal\nimport sys\n\nimport psutil\nimport os\nimport time\n\nM = sys.version_info.major\nm = sys.version_info.minor\nif not M >= 3 or not m >= 6:\n    message = 'check your python version before running japonicus.'\n    message += ' Python>=3.6 is required. Python==%i.%i detected.' % (M, m)\n    print(message)\n    exit(1)\n\n\nAware = False\n\n\ndef userExit(x, y):\n\n    parent = psutil.Process(os.getpid())\n    global Aware\n    if not Aware:\n        print(\"\\n\\nAborted by user. (SIGINT)\\n\\n\")\n        Aware = True\n    try:\n        for child in parent.children(recursive=True): \n            child.kill()\n        time.sleep(2)\n        exit(0)\n    except (SystemExit):\n        raise\n\n\nsignal.signal(signal.SIGINT, userExit)\n"
  },
  {
    "path": "japonicus/interface.py",
    "content": "#!/bin/python\nimport evaluation\n\n\n\ndef showTitleDisclaimer(backtestsettings, VERSION):\n    TITLE = \"\"\"\n        ██╗ █████╗ ██████╗  ██████╗ ███╗   ██╗██╗ ██████╗██╗   ██╗███████╗\n        ██║██╔══██╗██╔══██╗██╔═══██╗████╗  ██║██║██╔════╝██║   ██║██╔════╝\n        ██║███████║██████╔╝██║   ██║██╔██╗ ██║██║██║     ██║   ██║███████╗\n   ██   ██║██╔══██║██╔═══╝ ██║   ██║██║╚██╗██║██║██║     ██║   ██║╚════██║\n   ╚█████╔╝██║  ██║██║     ╚██████╔╝██║ ╚████║██║╚██████╗╚██████╔╝███████║\n    ╚════╝ ╚═╝  ╚═╝╚═╝      ╚═════╝ ╚═╝  ╚═══╝╚═╝ ╚═════╝ ╚═════╝ ╚══════╝\n    \"\"\"\n\n    TITLE += \"\\t EVOLUTIONARY GENETIC ALGORITHMS\"\n\n    try:\n        print(TITLE, end=\"\")\n    except UnicodeEncodeError or SyntaxError:\n        print(\"\\nJAPONICUS\\n\")\n    print('\\t' * 4 + 'v%.2f' % VERSION)\n    print()\n\n    profitDisclaimer = \"The profits reported here depends on backtest interpreter function;\"\n    interpreterFuncName = backtestsettings['interpreteBacktestProfit']\n    interpreterInfo = evaluation.gekko.backtest.getInterpreterBacktestInfo(\n        interpreterFuncName)\n\n    print(\"%s \\n\\t%s\\n\" % (profitDisclaimer, interpreterInfo))\n\n"
  },
  {
    "path": "japonicus/japonicus.py",
    "content": "#!/bin/python\n\nfrom . import halt, Settings, interface\n\nfrom time import sleep\nimport random\nfrom threading import Thread\n\nfrom .evolution_generations import Generations\n\n\nimport datetime\nimport os\n\nimport waitress\n\nimport promoterz\nfrom version import VERSION\n\n\ndef launchWebEvolutionaryInfo():\n    print(\"WEBSERVER MODE\")\n    webpageTitle = \"japonicus evolutionary statistics - v%.2f\" % VERSION\n    webApp, webServer = promoterz.webServer.core.build_server(webpageTitle)\n\n    webServerProcess = Thread(\n        target=waitress.serve,\n        kwargs={\n            \"app\": webServer,\n            \"listen\": \"0.0.0.0:8182\"\n        }\n    )\n\n    webServerProcess.start()\n    return webApp\n\n\ndef buildSettingsOptions(optionparser, settingSubsets):\n    settings = Settings.getSettings(SettingsFiles=settingSubsets)\n\n    # PARSE GENCONF & DATASET COMMANDLINE ARGUMENTS;\n    for settingSubset in settingSubsets:\n        parser = promoterz.metaPromoterz.generateCommandLineArguments(\n            optionparser,\n            settings[settingSubset])\n\n    options, args = parser.parse_args()\n    for settingSubset in settingSubsets:\n        settings[settingSubset] =\\\n            promoterz.metaPromoterz.applyCommandLineOptionsToSettings(\n            options,\n            settings[settingSubset]\n        )\n\n    return settings, options\n\n\ndef loadEvaluationModule():\n\n    req = [\n        \"validateSettings\",\n        \"showStatistics\"\n    ]\n    pass\n\n\nclass JaponicusSession():\n\n    def __init__(self, EvaluationModule, settings, options):\n\n        # ADDITIONAL MODES;\n        markzero_time = datetime.datetime.now()\n\n        print()\n\n        # show title;\n        interface.showTitleDisclaimer(settings['backtest'], VERSION)\n\n        self.web_server = launchWebEvolutionaryInfo()\\\n            if options.spawn_web else None\n        sleep(1)\n\n        if not EvaluationModule.validateSettings(settings):\n            exit(1)\n\n        # --SELECT STRATEGY;\n        if options.random_strategy:\n            Strategy = \"\"\n            GekkoStrategyFolder = os.listdir(settings['Global']['gekkoPath'] + '/strategies')\n            while Strategy + '.js' not in GekkoStrategyFolder:\n                if Strategy:\n                    print(\n                        \"Strategy %s descripted on settings but not found on strat folder.\" %\n                        Strategy\n                    )\n                Strategy = random.choice(list(settings['strategies'].keys()))\n                print(\"> %s\" % Strategy)\n        elif options.strategy:\n            Strategy = options.strategy\n        elif not options.skeleton:\n            print(\"No strategy specified! Use --strat or go --help\")\n            exit(1)\n\n        # --LAUNCH GENETIC ALGORITHM;\n        if options.genetic_algorithm:\n\n            japonicusOptions = {\n                \"GenerationMethod\": None,\n                \"TargetParameters\": None\n            }\n\n            japonicusOptions[\"GenerationMethod\"] =\\\n                'chromosome' if options.chromosome_mode else 'oldschool'\n\n            if options.skeleton:\n                EvaluationMode = 'indicator'\n                AllIndicators = Settings.getSettings()['indicators']\n                TargetParameters = Settings.getSettings()['skeletons'][options.skeleton]\n                for K in AllIndicators.keys():\n                    if type(AllIndicators[K]) != dict:\n                        TargetParameters[K] = AllIndicators[K]\n                    elif AllIndicators[K]['active']:\n                        TargetParameters[K] = AllIndicators[K]\n                        TargetParameters[K]['active'] = (0, 1)\n\n                japonicusOptions[\"TargetParameters\"] = TargetParameters\n\n                if not TargetParameters:\n                    print(\"Bad configIndicators!\")\n                    exit(1)\n            else:\n                EvaluationMode = Strategy\n                # READ STRATEGY PARAMETER RANGES FROM TOML;\n                try:\n                    TOMLData = promoterz.TOMLutils.preprocessTOMLFile(\n                        \"strategy_parameters/%s.toml\" % Strategy\n                    )\n                except FileNotFoundError:\n                    print(\"Failure to find strategy parameter rules for \" +\n                          \"%s at ./strategy_parameters\" % Strategy)\n                    gekkoParameterPath = \"%s/config/strategies/%s.toml\" %\\\n                                         (settings['Global']['gekkoPath'], Strategy)\n                    print(\"Trying to locate strategy parameters at %s\" %\n                          gekkoParameterPath)\n\n                    TOMLData = promoterz.TOMLutils.preprocessTOMLFile(\n                        gekkoParameterPath)\n\n                japonicusOptions[\"TargetParameters\"] =\\\n                    promoterz.TOMLutils.TOMLToParameters(TOMLData)\n\n            # RUN ONE EQUAL INSTANCE PER REPEATER NUMBER SETTINGS,\n            # SEQUENTIALLY...\n            for s in range(options.repeater):\n                Generations(\n                    EvaluationModule,\n                    japonicusOptions,\n                    EvaluationMode,\n                    settings,\n                    options,\n                    web=self.web_server\n                )\n\n        deltatime = datetime.datetime.now() - markzero_time\n        print(\"Run took %i seconds.\" % deltatime.seconds)\n        if options.spawn_web:\n            print('Statistics info server still runs...')\n\n\n"
  },
  {
    "path": "japonicus/options.py",
    "content": "\nimport optparse\n\nparser = optparse.OptionParser()\nparser.add_option(\n    '-g', '--genetic', dest='genetic_algorithm', action='store_true', default=False,\n    help=\"Genetic Algorithm evolution mode.\"\n)\nparser.add_option(\n    '-c', '--chromosome', dest='chromosome_mode', action='store_true', default=False,\n    help=\"Alternative internal representation of parameters for Genetic Algorithm mode.\"\n)\nparser.add_option(\n    '-b', '--bayesian', dest='bayesian_optimization', action='store_true', default=False,\n    help='Bayesian evolution mode.'\n)\nparser.add_option(\n    '-k', '--gekko', dest='spawn_gekko', action='store_true', default=False,\n    help=\"Launch gekko instance.\"\n\n)\nparser.add_option(\n    '-r', '--random', dest='random_strategy', action='store_true', default=False,\n    help=\"Run on random strategy.\"\n)\nparser.add_option(\n    '-e', '--benchmark', dest='benchmarkMode', action='store_true',\n    default=False,\n    help=\"Run GA benchmark mode. Strategy names are restricted to specific strats.\"\n)\nparser.add_option(\n    '-w', '--web', dest='spawn_web', action='store_true', default=False,\n    help=\"Launch japonicus web server showing evolutionary statistics.\"\n)\nparser.add_option('--repeat <x>', dest='repeater', type=int, default=1)\nparser.add_option('--strat <strat>', dest='strategy', default=None)\nparser.add_option('--skeleton <skeleton>', dest='skeleton', default=None)\n"
  },
  {
    "path": "japonicus-run",
    "content": "#!/bin/python\nimport os\n\nimport japonicus\nimport evaluation\nos.chdir(os.path.dirname(os.path.realpath(__file__)))\n\n\nsettings, options = japonicus.buildSettingsOptions(\n    japonicus.options.parser,\n    evaluation.gekko.SettingsFiles\n)\njaponicus.JaponicusSession(evaluation.gekko, settings, options)\n"
  },
  {
    "path": "jlivetrader.py",
    "content": "#!/bin/python\nimport os\nimport optparse\nimport json\n\nimport livetrader.exchangeMonitor\nimport livetrader.gekkoTrigger\nimport livetrader.gekkoChecker\n\ntry:\n    import livetrader.strategyRanker\nexcept Exception:\n    pass\n\nparser = optparse.OptionParser()\n\nparser.add_option('-b', '--balance',\n                  dest='balanceChecker', action='store_true', default=False)\n\nparser.add_option('-t', '--trigger <strategy>',\n                  dest='botTrigger', type='str', default='')\n\nparser.add_option('-c', dest='runningBotChecker',\n                  action='store_true', default=False)\n\nparser.add_option('-l', dest='tradingBot', action='store_true',\n                  default=False)\n\nparser.add_option('--candleSize <cs>',\n                  dest='candleSize', type='int', default=5)\n\nparser.add_option('--strat <strategy>', dest='strategy',\n                  type='str', default='')\n\nparser.add_option('--param <parameters>', dest='alternativeParameters',\n                  type='str', default=None)\n\nparser.add_option('-k', dest='killGekkoBots', action='store_true',\n                  default=False,\n                  help='Destroy all running gekko bot instances.')\n\nparser.add_option('-s', dest='viewLastTrades', action='store_true',\n                  default=False,\n                  help='Show last trades done by bots.')\n\noptions, args = parser.parse_args()\n\n\nif __name__ == '__main__':\n    os.chdir(os.path.dirname(os.path.realpath(__file__)))\n    exchange = livetrader.exchangeMonitor.Exchange('binance')\n\n    if options.balanceChecker:\n        totalUSD = exchange.getUserBalance()\n\n        print(\"net weight at %s: US$T%.2f\" % (\n            exchange.name,\n            totalUSD)\n        )\n\n    if options.botTrigger:\n        allPairs = exchange.getAssets()\n        assetCurrencyPairs = exchange.parseAssets(allPairs)\n        Stratlist = [options.botTrigger]\n\n        exchangeConfPath =\\\n            exchange.conf.binanceAssetCurrencyTargetFilePath\n        if exchangeConfPath:\n            exchangeMarketData = exchange.generateMarketsJson(\n                assetCurrencyPairs)\n            exchangeConfPath = os.path.join(exchangeConfPath,\n                                            'binance-markets.json')\n\n            with open(exchangeConfPath, 'w') as F:\n                json.dump(exchangeMarketData, F, indent=2)\n\n        livetrader.gekkoTrigger.launchBatchTradingBots(\n            assetCurrencyPairs,\n            Stratlist,\n            options\n        )\n\n    if options.runningBotChecker:\n        ranker = livetrader.strategyRanker.strategyRanker()\n        ranker.loadStrategyRankings()\n        userOrderHistory = exchange.getRecentOrders()\n        for M in userOrderHistory.keys():\n            marketOrderHistory = userOrderHistory[M]\n            if marketOrderHistory:\n                information = json.dumps(marketOrderHistory, indent=2)\n                print(information)\n\n        livetrader.gekkoChecker.checkGekkoRunningBots(exchange,\n                                                      ranker, options)\n    if options.killGekkoBots:\n        livetrader.gekkoChecker.stopGekkoBots()\n\n    if options.viewLastTrades:\n        Orders = exchange.getRecentOrders()\n        print(json.dumps(Orders, indent=2))\n\n\n"
  },
  {
    "path": "livetrader/exchangeMonitor.py",
    "content": "#!/bin/python\nimport ccxt\nimport json\nfrom japonicus import Settings\nimport time\n\n\nclass Exchange():\n    def __init__(self, name):\n        self.name = name\n        self.conf = Settings.makeSettings(Settings.loadTomlSettings(name))\n        secret = open(self.conf.credentialsFilePath).read()\n        secret = secret.split('\\n')\n        self.API = ccxt.binance({\n            'apiKey': secret[0],\n            'secret': secret[1]\n        })\n        self.API.load_markets()\n\n    def getCotations(self):\n        return self.fetchAssetPrices(self.getMarketsOfCurrency())\n\n    def parseAsset(self, Asset):\n        P = [float(Asset[code]) for code in ['free', 'locked']]\n        return P[0], P[1]\n\n    def fetchAssetPrices(self, Symbols):\n        Prices = {}\n        for Symbol in Symbols:\n            Cotation = self.API.fetch_ticker(Symbol)\n            Prices[Symbol] = float(Cotation['info']['lastPrice'])\n\n        return Prices\n\n    def getAveragePrices(self):\n        Cotations = self.getCotations()\n        AllCotations = list(Cotations.keys())\n\n        averagePrices = sum([Cotations[S] for S in AllCotations]) / len(AllCotations)\n        return averagePrices\n\n    def getMarketsOfCurrency(self, currency='USDT'):\n        return [S for S in self.API.symbols if '/%s' % currency in S]\n\n    def getUserBalance(self, Verbose=False):\n        Balance = self.API.fetch_balance()['info']['balances']\n        totalUSD = 0\n        Cotations = self.getCotations()\n\n        for Asset in Balance:\n            Free, Locked = self.parseAsset(Asset)\n            if Free or Locked:\n                if Verbose:\n                    print(Asset)\n                if Asset['asset'] == 'USDT':\n                    Symbol = 'USDT'\n                    totalAsset = Free + Locked\n                    assetValue = totalAsset\n                    if Verbose:\n                        print(\"%.2f USDT\" % totalAsset)\n                else:\n                    Symbol = '%s/USDT' % Asset['asset']\n                    if Symbol in self.API.symbols:\n                        price = Cotations[Symbol]\n                        if Verbose:\n                            print(\"%s price %.2f\" % (Asset['asset'], price))\n                        totalAsset = Free + Locked\n                        assetValue = (totalAsset * price)\n                    else:\n                        continue\n\n                totalUSD += assetValue\n                if Verbose:\n                    print('--')\n                    print(totalAsset)\n                    print(assetValue)\n                    print(totalUSD)\n                    print()\n\n        return totalUSD\n\n    def getAssets(self):\n        Assets = [A for A in self.API.symbols if 'USDT' in A]\n        return Assets\n\n    def parseAssets(self, assets):\n        LIST = []\n        for Asset in assets:\n            N = Asset.split('/')\n            A = {\n                'EXCHANGE': self.name,\n                'ASSET': N[0],\n                'CURRENCY': N[1]\n            }\n            LIST.append(A)\n\n        return LIST\n\n    def generateMarketsJson(self, Assets):\n        Assets = self.getAssets()\n        marketData = []\n        assetList = []\n        exchangeAssetInfo = self.API.publicGetExchangeInfo()['symbols']\n\n        for Asset in Assets:\n            pair = Asset.split('/')\n            assetList.append(pair[0])\n            pair.reverse()\n            orderInfo = None\n            for pairInfo in exchangeAssetInfo:\n                if pairInfo['symbol'] == Asset.replace('/', ''):\n                    allFilters = {}\n\n                    for Filter in pairInfo['filters']:\n                        del Filter['filterType']\n                        allFilters.update(Filter)\n\n                    orderInfo = {\n                        \"amount\": allFilters['minQty'],\n                        \"price\": allFilters['minPrice'],\n                        \"order\": 1\n                    }\n\n                    break\n\n            if orderInfo is None:\n                print(\"Failed to grab data for %s\" % Asset)\n                continue\n\n            pairEntry = {\n                \"pair\": pair,\n                \"minimalOrder\": orderInfo\n            }\n\n            marketData.append(pairEntry)\n\n        fullMarketData = {\n            \"assets\": assetList,\n            \"currencies\": [\"USDT\"],\n            \"markets\": marketData\n        }\n\n        return fullMarketData\n\n    def getRecentOrders(self, pastTimeRangeDays=2):\n        userOrderHistory = {}\n        for Market in self.getAssets():\n            pastTimeRange = pastTimeRangeDays * 24 * 3600\n            sinceTimestamp = (time.time() - pastTimeRange) * 1000\n            Orders = self.API.fetch_my_trades(Market, since=sinceTimestamp)\n\n            userOrderHistory[Market] = Orders\n\n        return userOrderHistory\n\n    def getPriceHistory(self):\n        candlestickData = {}\n        for Market in self.getAssets():\n            candlestickData[Market] = self.API.fetch_ohlcv(Market)\n\n        return candlestickData\n"
  },
  {
    "path": "livetrader/gekkoChecker.py",
    "content": "#!/bin/python\n\nfrom . import gekkoTrigger\n\ntry:\n    from . import assetAllocator\nexcept Exception:\n    pass\n\nfrom dateutil import parser as dateparser\nimport datetime\nimport csv\nimport re\nimport random\nfrom subprocess import Popen, PIPE\n\nimport pytoml\nimport os\nimport time\nimport json\n\n\ndef calculateMostIndicatedAssets(exchange):\n    candlestickData = exchange.getPriceHistory()\n    Assets = assetAllocator.selectMostProbableAssets(candlestickData)\n    Assets = [{'EXCHANGE': exchange.name,\n               'ASSET': a.split('/')[0],\n               'CURRENCY': a.split('/')[1]} for a in Assets]\n    return Assets\n\n\ndef stopGekkoBots():\n    PS = ['ps', 'aux']\n\n    runningProcs = Popen(PS,\n                         stdout=PIPE, stderr=PIPE)\n    runningProcs = runningProcs.stdout.read().decode('utf-8').split('\\n')\n    killPIDs = []\n\n    for proc in runningProcs:\n        if 'gekko/core' in proc:\n            PID = re.findall(\"\\d\\d\\d+\", proc)[0]\n            killPIDs.append(PID)\n\n    print(killPIDs)\n\n    for PID in killPIDs:\n        N = Popen(['kill', '-9', PID], stdout=PIPE)\n        N.communicate()\n\n\ndef interpreteRunningBotStatistics(runningBots):\n    allBotStrategies = []\n    runningTimes = []\n\n    for B in runningBots.keys():\n        Bot = runningBots[B]\n\n        if Bot[\"config\"][\"type\"] == 'tradebot':\n            botCurrentStrategy = Bot[\"config\"][\"tradingAdvisor\"][\"method\"]\n            allBotStrategies.append(botCurrentStrategy)\n\n        elif Bot[\"config\"][\"type\"] == 'market watcher':\n            fC = dateparser.parse(Bot[\"events\"][\"initial\"][\"candle\"][\"start\"])\n            lC = dateparser.parse(Bot[\"events\"][\"latest\"][\"candle\"][\"start\"])\n            delta = (lC - fC).seconds\n\n            runningTime = delta\n            runningTimes.append(runningTime)\n\n        else:\n            print(\"Odd runningBot found:\")\n            print(json.dumps(Bot, indent=2))\n\n    return runningTimes, allBotStrategies\n\n\ndef getParameterSettingsPath(parameterName):\n    N = os.path.join('strategy_parameters',\n                     parameterName) + '.toml'\n    return N\n\n\ndef operateStrategyScores(exchange, ranker,\n                          Balances, runningTimeHours,\n                          currentPortfolioStatistics, runningBotStrategies):\n    print(\"Rebooting gekko trading bots.\")\n\n    markzeroTime = datetime.timedelta(minutes=runningTimeHours*3600)\n    predictedStartTime = datetime.datetime.now() - markzeroTime\n    # APPLY LAST SCORE TO STRATEGIES;\n    ranker.loadStrategyRankings()\n\n    def makeBalanceScore(entry):\n        return (float(entry['BALANCE']) /\n                float(entry['AVERAGE_PRICE']))\n\n    pastCorrespondingScore = None\n    for row in Balances:\n        balanceDate = dateparser.parse(row['TIME'])\n        timeDelta = predictedStartTime - balanceDate\n        minuteDelta = abs(timeDelta.seconds) / 60\n        if minuteDelta < 60:\n            pastCorrespondingScore = makeBalanceScore(row)\n\n    if pastCorrespondingScore is not None:\n        currentScore =\\\n            makeBalanceScore(currentPortfolioStatistics)\n\n        botRunScore = currentScore / pastCorrespondingScore * 100\n        normalizedBotRunScore = botRunScore / runningTimeHours\n\n        runningStrategy = None\n        for Strategy in ranker.Strategies:\n            equalStrats = True\n            strategyParameters = pytoml.load(open(\n                getParameterSettingsPath(Strategy.parameters)))\n            print(runningBotStrategies[-1])\n            comparateParameters =\\\n                runningBotStrategies[-1]['params']\n            for param in comparateParameters.keys():\n                if type(param) == dict:\n                    continue\n                if param not in strategyParameters.keys():\n                    equalStrats = False\n                    break\n                if strategyParameters[param] !=\\\n                   comparateParameters[param]:\n                    equalStrats = False\n                    break\n            if equalStrats:\n                runningStrategy = Strategy\n                break\n\n        if runningStrategy:\n            print(\"Runnnig strategy found at scoreboard.\")\n            runningStrategy.profits.append(normalizedBotRunScore)\n        else:\n            print(\"Running strategy not found at scoreboard.\")\n\n    # WRITE NEW STRATEGY SCORES;\n    ranker.saveStrategyRankings()\n\n\ndef checkGekkoRunningBots(exchange, ranker, options):\n    runningBots = gekkoTrigger.getRunningGekkos()\n\n    BalancesFields = ['TIME', 'BALANCE', 'AVERAGE_PRICE']\n\n    selectorSigma = exchange.conf.strategySelectorSigma\n    allPairs = exchange.getAssets()\n    assetCurrencyPairs = exchange.parseAssets(allPairs)\n\n    try:\n        Balances = csv.DictReader(open('balances.csv'))\n    except FileNotFoundError:\n        print(\"Balances file not found.\")\n        Balances = []\n\n    Balances = [row for row in Balances]\n    wBalances = csv.DictWriter(open('balances.csv', 'w'),\n                               fieldnames=BalancesFields)\n    wBalances.writeheader()\n    for N in Balances:\n        wBalances.writerow(N)\n\n    currentPortfolioValue = exchange.getUserBalance()\n    print(\"Net weight %.2f USD\" % currentPortfolioValue)\n\n    currentPortfolioStatistics = {\n        'TIME': str(datetime.datetime.now()),\n        'BALANCE': currentPortfolioValue,\n        'AVERAGE_PRICE': exchange.getAveragePrices()\n    }\n\n    wBalances.writerow(currentPortfolioStatistics)\n\n    if runningBots:\n        runningTimes, runningBotStrategies =\\\n            interpreteRunningBotStatistics(runningBots)\n\n        if runningTimes and runningBotStrategies:\n            averageRunningTime = sum(runningTimes) / len(runningTimes)\n            runningTimeHours = averageRunningTime / 3600\n\n            targetMinimumRunningHours =\\\n                exchange.conf.strategyRunTimePeriodHours\n\n            # if target running time is reached;\n            if runningTimeHours > targetMinimumRunningHours:\n                operateStrategyScores(exchange, ranker,\n                                      Balances, runningTimeHours,\n                                      currentPortfolioStatistics,\n                                      runningBotStrategies)\n\n                Strategy = ranker.selectStrategyToRun(selectorSigma)\n\n                stopGekkoBots()\n                time.sleep(60)\n\n                selectedAssetCurrencyPairs = calculateMostIndicatedAssets(exchange)\n                gekkoTrigger.launchBatchTradingBots(\n                    selectedAssetCurrencyPairs,\n                    [Strategy.strategy],\n                    options\n                )\n\n            else:\n                print(\"Target runtime not reached.\")\n    else:\n        ranker.loadStrategyRankings()\n        print(\"Launching bots on idle gekko instance.\")\n        Strategy = ranker.selectStrategyToRun(selectorSigma)\n        selectedAssetCurrencyPairs = calculateMostIndicatedAssets(exchange)\n        print(assetCurrencyPairs)\n        print(selectedAssetCurrencyPairs)\n        gekkoTrigger.launchBatchTradingBots(\n            selectedAssetCurrencyPairs,\n            [Strategy.strategy],\n            options\n        )\n"
  },
  {
    "path": "livetrader/gekkoTrigger.py",
    "content": "#!/bin/python\nimport time\nfrom evaluation.gekko.API import httpPost\nfrom evaluation.gekko.dataset import epochToString\nimport requests\nimport json\nfrom promoterz import TOMLutils\n\n\ndef runTradingBot(botSpecifications, Strategy, options, TradingBot=False):\n    URL = \"http://localhost:3000/api/startGekko\"\n\n    if not Strategy:\n        Strategy = botSpecifications['STRATEGY']\n\n    print(\"Starting bot running %s for %s/%s at %s.\" % (\n        Strategy,\n        botSpecifications['ASSET'],\n        botSpecifications['CURRENCY'],\n        botSpecifications['EXCHANGE']))\n\n    traderParameters = {\n        \"tradingAdvisor\": {\n            \"enabled\": 'true',\n            \"method\": Strategy,\n            \"candleSize\": options.candleSize,\n            \"historySize\": 40\n        }\n    }\n\n    watchSettings = getWatchSettings(botSpecifications)\n    traderParameters.update(getTraderBaseParameters())\n    traderParameters.update(watchSettings)\n\n    if TradingBot:\n        traderParameters['type'] = \"tradebot\"\n        traderParameters['trader'] = {'enabled': 'true'}\n    else:\n        traderParameters['type'] = \"paper trader\"\n        traderParameters['paperTrader'] = {\n            \"feeMaker\": 0.25,\n            \"feeTaker\": 0.25,\n            \"feeUsing\": \"maker\",\n            \"slippage\": 0.05,\n            \"simulationBalance\": {\n                \"asset\": 0,\n                \"currency\": 100\n            },\n            \"reportRoundtrips\": 'true',\n            \"enabled\": 'true'\n        }\n\n    commonPath = 'strategy_parameters/%s.toml'\n    if options.alternativeParameters:\n        parameterPath = commonPath % options.alternativeParameters\n    else:\n        parameterPath = commonPath % Strategy\n\n    strategySettings = TOMLutils.preprocessTOMLFile(\n        parameterPath)\n    strategySettings = TOMLutils.TOMLToParameters(strategySettings)\n    traderParameters[Strategy] = strategySettings\n\n    watcherSettings = getWatcherBaseParameters()\n    watcherSettings.update(watchSettings)\n\n    ExistingWatcher = checkWatcherExists(watchSettings)\n    if not ExistingWatcher:\n        print(\"Creating watcher for %s!\" %\n              watchSettings['watch']['exchange'])\n        Watcher = httpPost(URL, watcherSettings)\n        time.sleep(4)\n    else:\n        print(\"Watcher for %s-%s exists! Creating none.\" %\n              (watchSettings['watch']['exchange'],\n              watchSettings['watch']['asset']))\n        Watcher = None\n        traderParameters\n\n    Trader = httpPost(URL, traderParameters)\n\n    return Watcher, Trader\n\n\ndef getTraderBaseParameters():\n    Request = {\n        \"market\": {\n            \"type\": \"leech\",\n            \"from\": epochToString(time.time())\n        },\n        \"mode\": \"realtime\",\n        \"adviceWriter\" : {\n            \"enabled\": 'false',\n            \"muteSoft\": 'false'\n            },\n        \"adviceLogger\": {\n            \"enabled\": 'false',\n            \"muteSoft\": 'false'\n            },\n\n        \"candleWriter\": {\n            \"enabled\": 'false',\n            \"adapter\": \"sqlite\"\n        },\n        \"type\": \"paper trader\",\n        \"performanceAnalyzer\": {\n            \"riskFreeReturn\": 2,\n            \"enabled\": 'false'\n        },\n        \"valid\": 'true'\n    }\n    return Request\n\n\ndef getWatchSettings(coinInfo):\n    W = {\n        \"watch\": {\n            \"exchange\": coinInfo[\"EXCHANGE\"],\n            \"currency\": coinInfo[\"CURRENCY\"].upper(),\n            \"asset\": coinInfo[\"ASSET\"].upper()\n        }\n    }\n    return W\n\n\ndef checkWatcherExists(Watch):\n    gekkoInstances = getRunningGekkos()\n    Watch = Watch['watch']\n    checkKeys = ['asset', 'currency', 'exchange']\n    for instanceName in gekkoInstances.keys():\n        instance = gekkoInstances[instanceName]\n        if instance['type'] == 'watcher':\n            FOUND = True\n            watcherTargetAssetCurrency = instance['config']['watch']\n            for C in checkKeys:\n                if watcherTargetAssetCurrency[C] != Watch[C]:\n                    FOUND = False\n                    break\n            if FOUND:\n                return instance['id']\n\n    return False\n\n\ndef getRunningGekkos():\n    try:\n        W = requests.get('http://localhost:3000/api/gekkos')\n    except requests.exceptions.ConnectionError:\n        print(\"Gekko is not running.\")\n        return {}\n    runningGekkos = json.loads(W.text)['live']\n    return runningGekkos\n\n\ndef getWatcherBaseParameters():\n    Request = {\n        \"candleWriter\": {\n            \"enabled\": \"false\",\n            \"adapter\": \"sqlite\"\n        },\n        \"type\": \"market watcher\",\n        \"mode\": \"realtime\"\n    }\n    return Request\n\n\ndef launchBatchTradingBots(assetCurrencyPairs, Stratlist, options):\n    for assetCurrencyPair in assetCurrencyPairs:\n        for Strategy in Stratlist:\n            w, t = runTradingBot(assetCurrencyPair, Strategy,\n                                 options, TradingBot=True)\n\n\n"
  },
  {
    "path": "livetrader/japonicusResultSelector.py",
    "content": "#!/bin/python\nimport os\nimport csv\nimport shutil\nimport names\n\nfrom . import exchangeMonitor\n\n\ndef readResultFolder(strategyName, runLogFolderPath, retrievalCount=1):\n    evalBreaksLogFilename = os.path.join(runLogFolderPath, 'evaluation_breaks.csv')\n    if not os.path.isfile(evalBreaksLogFilename):\n        print(\"Evaluation break log file not found.\")\n        return False\n\n    evalBreakLogs = open(evalBreaksLogFilename)\n\n    evalBreakLogs = csv.DictReader(evalBreakLogs)\n\n    positiveResults = []\n    for result in evalBreakLogs:\n        if result['evaluation'] > 0 and result['secondary'] > 0:\n            if len(list(result.keys())) > 2:\n                result['score'] = result['evaluation'] + result['secondary']\n                positiveResults.append(result)\n            else:\n                print(\"Naive logging system detected, from older japonicus version.\")\n                print(\"Unable to check result file.\")\n\n    if not positiveResults:\n        print(\"No positive results found!\")\n        return False\n\n    positiveResults = sorted(positiveResults,\n                             key=lambda r: r['score'], reverse=True)\n\n\n    parameterName = strategyName + names.get_full_name()\n\n    R = positiveResults[0]\n    stratPath = os.path.join(R['filepath'])\n    shutil.copy()\n\n    strategyRankings = exchangeMonitor.loadStrategyRankings()\n\n    newEntry = exchangeMonitor.strategyParameterSet(\n        {\n            'strategy': strategyName,\n            'parameters': parameterName,\n            'profits': []\n        }\n    )\n\n    strategyRankings.append(newEntry)\n\n    exchangeMonitor.saveStrategyRankings(strategyRankings)\n    \n    return True\n\n\ndef sweepLogFolder():\n    availableLogs = os.listdir('logs')\n    for folder in availableLogs:\n        print(folder)\n        strategyName = ''\n        readResult = readResultFolder(strategyName, folder)\n"
  },
  {
    "path": "livetrader/strategyRanker.py",
    "content": "#!/bin/python\nimport json\nimport pytoml\nimport random\n\n\nclass strategyRanker():\n    def __init__(self):\n        self.Strategies = []\n\n    def loadStrategyRankings(self):\n        W = json.load(open(\"gekkoStrategyRankings.json\"))\n        self.Strategies = []\n        for s in W:\n            S = strategyParameterSet(s)\n            self.Strategies.append(S)\n\n    def saveStrategyRankings(self):\n        outputList = []\n\n        for strategy in self.Strategies:\n            outputList.append(strategy.toJson())\n\n        json.dump(outputList, open(\"gekkoStrategyRankings.json\", 'w'))\n\n    def selectStrategyToRun(self, sigma=10):\n        # SELECT AND LAUNCH TRADING BOT BATCH WITH SELECTED STRATEGY;\n        if random.random() < sigma / 100:\n            Strategy = sorted(self.Strategies,\n                              key=lambda s: s.getScore(), reverse=True)[0]\n        else:\n            Strategy = random.choice(self.Strategies)\n\n        return Strategy\n\n\nclass strategyParameterSet():\n    def __init__(self, jsonData):\n        self.Attributes = ['strategy', 'parameters', 'profits']\n        self.fromJson(jsonData)\n\n    def fromJson(self, jsonData):\n        for Name in self.Attributes:\n            self.__dict__[Name] = jsonData[Name]\n\n    def toJson(self):\n        jsonData = {}\n        for Name in self.Attributes:\n            jsonData[Name] = self.__dict__[Name]\n        return jsonData\n\n    def loadParameterSet(self):\n        self.parameterSet = pytoml.load(open(self.parameters))\n\n    def getScore(self):\n        if self.profits:\n            return sum(self.profits) / len(self.profits)\n        else:\n            return 0\n"
  },
  {
    "path": "promoterz/README.md",
    "content": "A python module specialized on genetic algorithms using various representations. \nIntended to evolve a dict of parameters, nested or not, provided with respective ranges.\n\n```\nsampleParameters = {'short': (6,8),\n              'persist': (1,50),\n              'variableY': (2,6),\n              'ROP_weight': (5,7),\n              'santa': (1,10),\n              'thresholds': {\n                 'top': (8,11),\n                 'bottom': (17,32)\n              },\n              'IL12': (3,8)}\n```\n"
  },
  {
    "path": "promoterz/TOMLutils.py",
    "content": "#!/bin/python\nimport re\nimport pytoml\n\ndef preprocessTOMLFile(filepath):\n    f = open(filepath)\n    return f\n\n\ndef TOMLToParameters(TOMLDATA):\n    Parameters = pytoml.load(TOMLDATA)\n\n    for Parameter in Parameters.keys():\n        if type(Parameter) == str:\n            if '=' in Parameter:\n                Parameter = Parameter.replace('=', '')\n                Parameter = float(Parameter)\n            \n    return Parameters\n\n\ndef parametersToTOML(Settings):\n    Text = pytoml.dumps(Settings)\n    \n    return Text\n"
  },
  {
    "path": "promoterz/__init__.py",
    "content": "#!/bin/python\nfrom .import functions\n\nfrom .import supplement, validation, parameterOperations\nfrom .import evolutionHooks\nfrom .import world, locale\nfrom .import evaluationPool\nfrom .import logger\nfrom .import metaPromoterz\nfrom .import sequence\nfrom .import webServer\nfrom .import TOMLutils\nfrom .import evaluationBreak\n"
  },
  {
    "path": "promoterz/environment.py",
    "content": "#!/bin/python\n\n\nclass Environment():\n    def __init__(self, propertyGenenerator):\n        self.w = None\n"
  },
  {
    "path": "promoterz/evaluationBreak.py",
    "content": "#!/bin/python\nimport random\nimport json\nimport csv\nfrom deap import tools\n\n\nimport promoterz\nimport evaluation\nfrom . import TOMLutils\n\n\ndef showResults(World):\n    validationDatasets = []\n    # IS EVALUATION DATASET LOADED? USE IT;\n    if World.EnvironmentParameters['evaluation']:\n        useSecondary = 'evaluation'\n    else:\n        useSecondary = 'evolution'\n\n    # LOAD EVALUATION DATASET;\n    sourceDataset = random.choice(World.EnvironmentParameters[useSecondary])\n    getter = evaluation.gekko.datasetOperations.getRandomSectorOfDataset\n    for NB in range(World.conf.evalbreak.proofSize):\n        newDataset = getter(sourceDataset, World.conf.backtest.deltaDays)\n        validationDatasets.append(newDataset)\n\n    for LOCALE in World.locales:\n        LOCALE.population = [ind for ind in LOCALE.population\n                             if ind.fitness.valid]\n\n        # SELECT BEST INDIVIDUALS;\n        B = World.conf.evalbreak.NBBESTINDS\n        BestIndividues = tools.selBest(LOCALE.population, B)\n        Z = min(World.conf.evalbreak.NBADDITIONALINDS,\n                len(LOCALE.population) - B)\n        Z = max(0, Z)\n\n        # SELECT ADDITIONAL INDIVIDUALS;\n        AdditionalIndividues = promoterz.evolutionHooks.Tournament(\n            LOCALE.population, Z, Z * 2\n        )\n        print(\"%i selected;\" % len(AdditionalIndividues))\n        AdditionalIndividues = [\n            x for x in AdditionalIndividues if x not in BestIndividues\n        ]\n        setOfToEvaluateIndividues = BestIndividues + AdditionalIndividues\n        print(\"%i selected;\" % len(setOfToEvaluateIndividues))\n        print(\"Selecting %i+%i individues, random test;\" % (B, Z))\n\n        currentSessionBreakResults = []\n        # EVALAUTE EACH SELECTED INDIVIDUE;\n        for FinalIndividue in setOfToEvaluateIndividues:\n            GlobalLogEntry = {}\n            proof = stratSettingsProofOfViability\n            AssertFitness, FinalProfit, Results = proof(\n                World, FinalIndividue, validationDatasets\n            )\n            LOCALE.lastEvaluation = FinalProfit\n            GlobalLogEntry['evaluation'] = FinalProfit\n            World.logger.log(\n                \"\\n\\n\\nTesting Strategy of %s @ EPOCH %i:\\n\" % (\n                    LOCALE.name,\n                    LOCALE.EPOCH)\n            )\n\n            for R, Result in enumerate(Results):\n                World.logger.log(\n                    evaluation.gekko.showBacktestResult(Result,\n                                       validationDatasets[R]) + '\\n')\n\n            World.logger.log(\n                '\\nRelative profit on evolution dataset: %.3f' %\n                FinalProfit)\n            if AssertFitness or FinalProfit > 50:\n                World.logger.log(\"Current parameters are viable.\")\n            else:\n                World.logger.log(\"Current parameters fails.\")\n                if not World.conf.Global.showFailedStrategies:\n                    World.logger.log(\n                        \"Skipping further tests on current parameters.\",\n                        show=False)\n                    continue\n\n            FinalIndividueSettings = World.tools.constructPhenotype(\n                FinalIndividue)\n\n            # -- PREFETCH TOMLSettings;\n            TOMLSettings = TOMLutils.parametersToTOML(\n                FinalIndividueSettings\n            )\n\n            # --EVALUATION DATASET TEST AND REPORT;\n            if World.EnvironmentParameters['evaluation']:\n                evalDataset = random.choice(\n                    World.EnvironmentParameters['evaluation'])\n                evalDataset = getter(evalDataset, 0)\n                secondaryResults = World.parallel.evaluateBackend(\n                    [evalDataset], 0, [FinalIndividue]\n                )\n                print()\n                # print(secondaryResults)\n                backtestResult = secondaryResults[0][0]\n                World.logger.log(\n                    \"Relative profit on evaluation dataset: \\n\\t%s\" %\n                    evaluation.gekko.showBacktestResult(backtestResult))\n                LOCALE.lastEvaluationOnSecondary =\\\n                    backtestResult['relativeProfit']\n                GlobalLogEntry['secondary'] =\\\n                    backtestResult['relativeProfit']\n\n                currentSessionBreakResults.append((backtestResult['relativeProfit'],\n                                                   TOMLSettings))\n            else:\n                print(\"Evaluation dataset is disabled.\")\n\n            # LOG AND SHOW PARAMETERS;\n            Show = json.dumps(FinalIndividueSettings, indent=2)\n            print(\"~\" * 18)\n            World.logger.log(\" %.3f final profit ~~~~\" % FinalProfit)\n            print(\" -- Settings for Gekko config.js -- \")\n            World.logger.log(Show)\n            print(\" -- Settings for Gekko --ui webpage -- \")\n\n            World.logger.log(TOMLSettings)\n\n            paramsFilename = \"%s-EPOCH%i\" % (LOCALE.name,\n                                             LOCALE.EPOCH)\n            World.logger.saveParameters(paramsFilename, TOMLSettings)\n            GlobalLogEntry['filename'] = paramsFilename\n            print(\"\\nRemember to check MAX and MIN values for each parameter.\")\n            print(\"\\tresults may improve with extended ranges.\")\n            World.EvaluationStatistics.append(GlobalLogEntry)\n\n    # SAVE GLOBAL EVALUATION LOGS;\n    evaluationBreaksFilename = 'logs/evaluation_breaks.csv'\n\n    if World.EvaluationStatistics:\n        fieldnames = list(World.EvaluationStatistics[0].keys())\n        with open(evaluationBreaksFilename, 'w') as f:\n            GlobalEvolutionSummary = csv.DictWriter(f, fieldnames)\n            GlobalEvolutionSummary.writeheader()\n            World.logger.log('\\t'.join(GlobalEvolutionSummary.fieldnames),\n                             target=\"Summary\",\n                             show=False, replace=True)\n\n            for n in World.EvaluationStatistics:\n                GlobalEvolutionSummary.writerow(n)\n        with open(evaluationBreaksFilename) as f:\n            GlobalEvolutionSummary = csv.DictReader(f)\n            for row in GlobalEvolutionSummary:\n                World.logger.log('\\t'.join([row[x] for x in row.keys()]),\n                                 target=\"Summary\",\n                                 show=False, replace=False)\n\n    World.logger.updateFile()\n\n    # UPDATE WEB SERVER VISUALIZATION;\n    if World.web:\n        World.web.updateEvalBreakGraph(World.web, World.EvaluationStatistics)\n        World.web.resultParameters += currentSessionBreakResults\n\n\ndef stratSettingsProofOfViability(World, Individual, Datasets):\n    AllProofs = []\n    # Datasets = [[x] for x in Datasets]\n    Results = World.parallel.evaluateBackend(Datasets, 0, [Individual])\n    for W in Results[0]:\n        AllProofs.append(W['relativeProfit'])\n    testMoney = 0\n    for value in AllProofs:\n        testMoney += value\n    check = [x for x in AllProofs if x > 0]\n    Valid = sum(check) == len(AllProofs)\n    return Valid, testMoney, Results[0]\n"
  },
  {
    "path": "promoterz/evaluationPool.py",
    "content": "#!/bin/python\nimport time\nimport random\nimport itertools\n\nfrom multiprocessing import Pool, TimeoutError\nfrom multiprocessing.pool import ThreadPool\n\n\nclass EvaluationPool():\n\n    def __init__(self,\n                 World,\n                 Urls, poolsize, individual_info):\n        self.World = World\n\n        self.Urls = Urls\n        self.lasttimes = [0 for x in Urls]\n        self.lasttimesperind = [0 for x in Urls]\n        self.poolsizes = [poolsize for x in Urls]\n        self.individual_info = individual_info\n\n    def evaluateBackend(self, datasets, I, inds):\n        stime = time.time()\n        dateInds = list(itertools.product(datasets, inds))\n        # print(list(dateInds))\n        Q = [\n            ([dataset], Ind, self.Urls[I])\n            for dataset, Ind in dateInds\n        ]\n        P = Pool(self.poolsizes[I])\n        fitnesses = P.starmap(self.World.tools.Evaluate, Q)\n        P.close()\n        P.join()\n        delta_time = time.time() - stime\n        return fitnesses, delta_time\n\n    def evaluatePopulation(self, locale):\n        individues_to_simulate = [\n            ind for ind in locale.population if not ind.fitness.valid\n        ]\n        props = self.distributeIndividuals(individues_to_simulate)\n        args = [\n            [\n                locale.Dataset,\n                I,\n                props[I],\n            ]\n            for I in range(len(self.Urls))\n        ]\n        pool = ThreadPool(len(self.Urls))\n        results = []\n        try:\n            for A in args:\n                results.append(pool.apply_async(self.evaluateBackend, A))\n            pool.close()\n        except (SystemExit, KeyboardInterrupt):\n            print(\"Aborted by user.\")\n            exit(0)\n        TimedOut = []\n        for A in range(len(results)):\n            try:\n                perindTime = 3 * self.lasttimesperind[A]\\\n                             if self.lasttimesperind[A] else 12\n                timeout = perindTime * len(props[A])\\\n                    if A else None  # no timeout for local machine;\n                results[A] = results[A].get(timeout=timeout)\n            except TimeoutError:  # Timeout: remote machine is dead;\n                print(\"Machine timeouts!\")\n                args[A][1] = 0  # Set to evaluate @ local machine\n                results[A] = self.evaluateBackend(* args[A])\n                TimedOut.append(A)\n        pool.join()\n        TotalNumberOfTrades = 0\n        for PoolIndex in range(len(results)):\n            for i, fit in enumerate(results[PoolIndex][0]):\n                if self.individual_info:\n                    print(self.World.tools.showIndividue(fit))\n                self.World.tools.ApplyResult(fit, props[PoolIndex][i])\n\n                TotalNumberOfTrades += fit['trades']\n            self.lasttimes[PoolIndex] = results[PoolIndex][1]\n            L = len(props[PoolIndex])\n            self.lasttimesperind[PoolIndex] =\\\n                self.lasttimes[PoolIndex] / L if L else 5\n        F = [x.fitness.valid for x in individues_to_simulate]\n        assert (all(F))\n        for T in TimedOut:\n            self.ejectURL(T)\n        N = len(individues_to_simulate)\n        # RECORD NUMBER OF EVALUATIONS;\n        locale.World.totalEvaluations += N\n        # CALCULATE AVERAGE TRADE NUMBER;\n        averageTrades = TotalNumberOfTrades / max(1, N)\n        return N, averageTrades\n"
  },
  {
    "path": "promoterz/evolutionHooks.py",
    "content": "#!/bin/python\nfrom deap import base, tools\nfrom copy import deepcopy\n\nimport random\n\nimport promoterz.supplement.age\nimport promoterz.supplement.PRoFIGA\nimport promoterz.supplement.phenotypicDivergence\n\nimport itertools\n\n\n# population as last positional argument, to blend with toolbox;\ndef immigrateHoF(HallOfFame, population):\n    if not HallOfFame.items:\n        return population\n\n    for Q in range(1):\n        CHP = deepcopy(random.choice(HallOfFame))\n        del CHP.fitness.values\n        population += [CHP]\n    return population\n\n\ndef immigrateRandom(populate, nb_range, population):  # (populate function)\n    number = random.randint(*nb_range)\n    population += populate(number)\n    return population\n\n\ndef filterAwayWorst(population, N=5):\n    aliveSize = len(population) - 5\n    population = tools.selBest(population, aliveSize)\n    return population\n\n\ndef filterAwayThreshold(locale, Threshold, min_nb_inds):\n    thresholdFilter = lambda ind: ind.fitness.values[0] > Threshold\n    populationFilter(locale, thresholdFilter, min_nb_inds)\n\n\ndef filterAwayTradeCounts(locale, ThresholdRange, min_nb_inds):\n    def tradecountFilter(ind):\n        if ind.trades < ThresholdRange[0]:\n            return False\n        elif ind.trades > ThresholdRange[1]:\n            return False\n        else:\n            return True\n\n    populationFilter(locale, tradecountFilter, min_nb_inds)\n\n\ndef filterAwayRoundtripDuration(locale, ThresholdRange, min_nb_inds):\n    def roundtripDurationFilter(ind):\n        averageExposureHours = ind.averageExposure\n        if averageExposureHours < ThresholdRange[0]:\n            return False\n        elif averageExposureHours > ThresholdRange[1]:\n            return False\n        else:\n            return True\n\n    populationFilter(locale, roundtripDurationFilter, min_nb_inds)\n\n\ndef populationFilter(locale, filterFunction, min_nb_inds):\n\n    newPopulation = [\n        ind for ind in locale.population if filterFunction(ind)\n    ]\n    removed = [ind for ind in locale.population if ind not in newPopulation]\n\n    NBreturn = min(min_nb_inds - len(locale.population),\n                          min_nb_inds)\n    NBreturn = max(0, NBreturn)\n    if NBreturn and removed:\n        for k in range(NBreturn):\n            if removed:\n                newPopulation.append(removed.pop(random.randrange(0,\n                                                                  len(removed))))\n\n    locale.population = newPopulation\n\n\ndef evaluatePopulation(locale):\n    individues_to_simulate = [ind for ind in locale.population\n                              if not ind.fitness.valid]\n    fitnesses = locale.World.parallel.starmap(\n        locale.extratools.Evaluate, zip(individues_to_simulate)\n    )\n    for i, fit in zip(range(len(individues_to_simulate)), fitnesses):\n        individues_to_simulate[i].fitness.values = fit\n    return len(individues_to_simulate)\n\n\ndef getLocaleEvolutionToolbox(World, locale):\n    toolbox = base.Toolbox()\n    toolbox.register(\"ImmigrateHoF\", immigrateHoF, locale.HallOfFame)\n    toolbox.register(\"ImmigrateRandom\", immigrateRandom, World.tools.population)\n    toolbox.register(\"filterThreshold\", filterAwayThreshold, locale)\n    toolbox.register(\"filterTrades\", filterAwayTradeCounts, locale)\n    toolbox.register(\"filterExposure\", filterAwayRoundtripDuration, locale)\n    toolbox.register('ageZero', promoterz.supplement.age.ageZero)\n    toolbox.register(\n        'populationAges',\n        promoterz.supplement.age.populationAges,\n        World.conf.generation.ageBoundaries,\n    )\n    toolbox.register(\n        'populationPD',\n        promoterz.supplement.phenotypicDivergence.populationPhenotypicDivergence,\n        World.tools.constructPhenotype,\n    )\n    toolbox.register('evaluatePopulation', evaluatePopulation)\n    return toolbox\n\n\ndef getGlobalToolbox(representationModule):\n    # GLOBAL FUNCTION TO GET GLOBAL TBX UNDER DEVELOPMENT;\n    toolbox = base.Toolbox()\n    creator.create(\"FitnessMax\", base.Fitness, weights=(1.0,))\n    creator.create(\n        \"Individual\",\n        list,\n        fitness=creator.FitnessMax,\n        PromoterMap=None,\n        Strategy=genconf.Strategy,\n    )\n    toolbox.register(\"mate\", representationModule.crossover)\n    toolbox.register(\"mutate\", representationModule.mutate)\n    PromoterMap = initPromoterMap(Attributes)\n    toolbox.register(\"newind\", initInd, creator.Individual, PromoterMap)\n    toolbox.register(\"population\", tools.initRepeat, list, toolbox.newind)\n    toolbox.register(\"constructPhenotype\", representationModule.constructPhenotype)\n    return toolbox\n\n\ndef getFitness(individual):\n    R = sum(individual.wvalues)\n\n\ndef selectCriteria(ind):\n    return sum(ind.fitness.wvalues)\n\n\ndef selBest(individuals, number):\n    chosen = sorted(individuals, key=selectCriteria, reverse=True)\n    return chosen[:number]\n\n\ndef Tournament(individuals, finalselect, tournsize):\n    chosen = []\n    for i in range(finalselect):\n        aspirants = tools.selRandom(individuals, tournsize)\n        chosen.append(max(individuals, key=selectCriteria))\n    return chosen\n"
  },
  {
    "path": "promoterz/evolutionToolbox.py",
    "content": "#!/bin/python\nfrom deap import base\n\n\ndef getExtraTools(HallOfFame, W):\n    T = base.Toolbox()\n    T.register('q')\n"
  },
  {
    "path": "promoterz/functions.py",
    "content": "#!/bin/python\nimport random\nfrom deap import base\nfrom deap import creator\nfrom deap import tools\nfrom copy import deepcopy\n\nimport importlib\n\n\ndef PrepareAndEvaluate(constructPhenotype, evaluationMethod, Individual):\n    phenotype = constructPhenotype(Individual)\n    return evaluationMethod(phenotype)\n\n\ndef selectRepresentationMethod(methodname):\n    M = importlib.import_module(\"promoterz.representation.%s\" % methodname)\n    return M\n"
  },
  {
    "path": "promoterz/locale.py",
    "content": "#!/bin/python\nfrom deap import tools\n\nfrom . import evolutionHooks\nfrom . import statistics\n\n\nclass Locale():\n\n    def __init__(self, World, name, position, loop):\n        self.World = World\n        self.name = name\n        self.EPOCH = 0\n        self.position = position\n        self.EvolutionStatistics = []\n        self.HallOfFame = tools.HallOfFame(30)\n        self.extratools = evolutionHooks.getLocaleEvolutionToolbox(\n            World, self\n        )\n        # GENERATION METHOD SELECTION;\n        # to easily employ various GA algorithms,\n        # this base EPOCH processor loads a GenerationMethod file,\n        # which should contain a genToolbox function to generate\n        # fully working DEAP toolbox, and a reconstructTradeSettings\n        # function to convert parameters from individue to usable strategy Settings;\n        # Check promoterz/representation;\n        #genconf.Strategy = Strategy # ovrride strat defined on settings if needed;\n\n        # --initial population\n        self.population = World.tools.population(World.conf.generation.POP_SIZE)\n        self.lastEvaluation = None\n        self.lastEvaluationOnSecondary = None\n\n        # --INIT STATISTICS;\n        self.stats = statistics.getStatisticsMeter()\n        self.InitialBestScores, self.FinalBestScores = [], []\n        self.POP_SIZE = World.conf.generation.POP_SIZE\n        self.loop = loop\n\n    def run(self):\n        print(self.name)\n        self.loop(self.World, self)\n        self.EPOCH += 1\n"
  },
  {
    "path": "promoterz/logAnalysis.py",
    "content": ""
  },
  {
    "path": "promoterz/logger.py",
    "content": "#!/bin/python\nimport datetime\nimport os\nimport csv\n\n\nclass Logger():\n    def __init__(self, logfilename):\n        date = datetime.datetime.now()\n        if not os.path.isdir('logs'):\n            os.mkdir('logs')\n\n        self.logfilename = logfilename\n        self.Header = \"\"\n        self.Summary = \"\"\n        self.Body = \"\"\n        self.Online = False\n\n    def log(self, message, target=\"Body\", show=True, replace=False):\n        if target == \"Body\":\n            # now the log has value to be written.\n            if not self.Online:\n                os.mkdir('logs/%s' % self.logfilename)\n                os.mkdir('logs/%s/results' % self.logfilename)\n            self.Online = True\n\n        if replace:\n            self.__dict__[target] = message\n        else:\n            self.__dict__[target] += message + '\\n'\n        if show:\n            print(message)\n\n    def updateFile(self):\n        if not self.Online:\n            return\n        File = open('logs/%s/japonicus.log' % self.logfilename, 'w')\n        for segment in [self.Header, self.Summary, self.Body]:\n            File.write(segment + '\\n')\n\n        File.close()\n\n    def write_evolution_logs(self, i, stats, localeName):\n        filename = \"logs/%s/%s.csv\" % (self.logfilename, localeName)\n        if stats:\n            fieldnames = list(stats[0].keys())\n            with open(filename, 'w') as f:\n                df = csv.DictWriter(f, fieldnames)\n                df.writeheader()\n                df.writerows(stats)\n\n    def saveParameters(self, filename, content):\n        filename = \"logs/%s/results/%s.toml\" % (self.logfilename, filename)\n        File = open(filename, 'w')\n        File.write(content)\n        File.close()\n"
  },
  {
    "path": "promoterz/metaPromoterz.py",
    "content": "#!/bin/python\n\n# this file contains functions for 'meta genetic algorithm',\n# this acts to allow settings value manipulation via command line,\n# making possible a simple GA of GAs under bash.\n\n# TBD\nfrom .parameterOperations import flattenParameters, expandNestedParameters\n\n\ndef generateCommandLineArguments(parser, settings):\n    flatSettings = flattenParameters(settings)\n    for Setting in flatSettings.keys():\n        if type(flatSettings[Setting])  in [list, bool, tuple]:\n            pass\n        else:\n            originalValue = flatSettings[Setting]\n            parameterType = type(originalValue)\n            if parameterType.__name__ == 'NoneType':\n                parameterType = str\n            parser.add_option(\"--%s\" % Setting,\n                              dest=Setting,\n                              type=parameterType.__name__,\n                              default=originalValue)\n\n    return parser\n\n\ndef applyCommandLineOptionsToSettings(options, settings):\n    flatSettings = flattenParameters(settings)\n\n    for Setting in flatSettings.keys():\n        if Setting in options.__dict__.keys():\n            flatSettings[Setting] = options.__dict__[Setting]\n\n    Settings = expandNestedParameters(flatSettings)\n    return Settings\n"
  },
  {
    "path": "promoterz/parameterOperations.py",
    "content": "\n\n#!/bin/python\ndef flattenParameters(Parameters):\n    result = {}\n\n    def iter(D, path= []):\n        for q in D.keys():\n            if type(D[q]) == dict:\n                iter(D[q], path + [q])\n            else:\n                path_keyname = \".\".join(path + [q])\n                result.update({path_keyname: D[q]})\n\n    iter(Parameters)\n    return result\n\n\ndef expandNestedParameters(Parameters):\n    _Parameters = {}\n    for K in Parameters.keys():\n        if '.' in K:\n            Q = K.split('.')\n            cursor = 0\n            base = _Parameters\n            while cursor < len(Q) - 1:\n                if not Q[cursor] in base.keys():\n                    base[Q[cursor]] = {}\n                base = base[Q[cursor]]\n                cursor += 1\n            base[Q[cursor]] = Parameters[K]\n        else:\n            _Parameters[K] = Parameters[K]\n    return _Parameters\n\n\ndef parameterValuesToRangeOfValues(TargetParameters, Spread):\n    for parameter in TargetParameters.keys():\n        P = TargetParameters[parameter]\n        if type(P) not in [tuple, list]:\n            spread_change = Spread * P / 200\n            if P < 0:\n                spread_change = -spread_change\n            TargetParameters[parameter] = (P - spread_change, P + spread_change)\n    return TargetParameters\n"
  },
  {
    "path": "promoterz/representation/Creator.py",
    "content": "#!/bin/python\nfrom .import deapCreator as creator\nfrom deap import base\n\n\ndef init(fitness, extraParameters):\n    creator.create(\"FitnessMax\", fitness, weights=(1.0, 1))\n    creator.create(\"Individual\", list, fitness=creator.FitnessMax, **extraParameters)\n    return creator\n"
  },
  {
    "path": "promoterz/representation/chromosome.py",
    "content": "#!/bin/python\nfrom deap import base\nfrom deap import tools\n\nfrom copy import deepcopy\nimport random\n\nfrom . .import parameterOperations\n\nfrom .import Creator\n\ngetPromoterFromMap = lambda x: [x[z] for z in list(x.keys())]\n\n\ndef constructPhenotype(stratSettings, chrconf, Individue):\n    Settings = {}\n    GeneSize = 2\n    R = lambda V, lim: (lim[1] - lim[0]) * V / (33 * chrconf['GeneSize']) + lim[0]\n    PromotersPath = {v: k for k, v in Individue.PromoterMap.items()}\n    # print(PromotersPath)\n    #print(Individue[:])\n    Promoters = list(PromotersPath.keys())\n    for C in Individue:\n        for BP in range(len(C)):\n            if C[BP] in Promoters:\n                read_window = C[BP + 1: BP + 1 + GeneSize]\n                read_window = [V for V in read_window if type(V) == int and V < 33]\n                Value = sum(read_window)\n                ParameterName = PromotersPath[C[BP]]\n                Value = R(Value, stratSettings[ParameterName])\n                Settings[ParameterName] = Value\n    _Settings = parameterOperations.expandNestedParameters(Settings)\n    return _Settings\n\n\ndef getToolbox(Strategy, genconf, Attributes):\n    toolbox = base.Toolbox()\n    creator = Creator.init(base.Fitness, {'promoterMap': None, 'Strategy': Strategy})\n    # creator.create(\"FitnessMax\", base.Fitness, weights=(1.0, 3))\n    toolbox.register(\"mate\", pachytene)\n    toolbox.register(\"mutate\", mutate)\n    PromoterMap = initPromoterMap(Attributes)\n    toolbox.register(\n        \"newind\", initInd, creator.Individual, PromoterMap, genconf.chromosome\n    )\n    toolbox.register(\"population\", tools.initRepeat, list, toolbox.newind)\n    toolbox.register(\n        \"constructPhenotype\", constructPhenotype, Attributes, genconf.chromosome\n    )\n    return toolbox\n\n\ndef initPromoterMap(ParameterRanges):\n    PRK = list(ParameterRanges.keys())\n    Promoters = [x for x in PRK]\n    space = list(range(120, 240))\n    random.shuffle(space)\n    PromoterValues = [space.pop() for x in Promoters]\n    PromoterMap = dict(zip(Promoters, PromoterValues))\n    # print(ParameterRanges)\n    assert (len(PRK) == len(list(PromoterMap.keys())))\n    return PromoterMap\n\n\ndef initChromosomes(PromoterMap, chrconf):\n    Promoters = getPromoterFromMap(PromoterMap)\n    PromoterPerChr = round(len(Promoters) / chrconf['Density']) + 1\n    _promoters = deepcopy(Promoters)\n    Chromosomes = [[] for k in range(PromoterPerChr)]\n    while _promoters:\n        for c in range(len(Chromosomes)):\n            if random.random() < 0.3:\n                if _promoters:\n                    promoter = _promoters.pop(random.randrange(0, len(_promoters)))\n                    Chromosomes[c].append(promoter)\n            for G in range(chrconf['GeneSize']):\n                Chromosomes[c].append(random.randrange(0, 33))\n    return Chromosomes\n\n\ndef initInd(Individual, PromoterMap, chrconf):\n    i = Individual()\n    i[:] = initChromosomes(PromoterMap, chrconf)\n    i.PromoterMap = PromoterMap\n    return i\n\n\ndef generateUID():\n    Chars = string.ascii_uppercase + string.digits\n    UID = ''.join(random.choices(Chars), k=6)\n    return UID\n\n\ndef chromossomeCrossover(chr1, chr2):\n    if len(chr1) != len(chr2):\n        top_bottom = 1 if random.random() < 0.5 else -1\n        len_diff = abs(len(chr1) - len(chr2))\n    else:\n        top_bottom = 1\n        len_diff = 0\n    offset = random.randrange(0, len_diff + 1)\n    minor = chr1 if len(chr1) < len(chr2) else chr2\n    major = chr2 if len(chr1) < len(chr2) else chr1\n    cut_point = random.randrange(0, len(minor))\n    for k in range(cut_point, len(minor)):\n        Buffer = major[k + offset]\n        major[k + offset] = minor[k]\n        minor[k] = Buffer\n\n\ndef pachytene(ind1, ind2):\n    if len(ind1) != len(ind2):\n        return\n\n    ind1 = deepcopy(ind1)\n    ind2 = deepcopy(ind2)\n    ind1[:] = sorted(ind1, key=len)\n    ind2[:] = sorted(ind2, key=len)\n    childChr = []\n    for W in range(len(ind1)):\n        chromossomeCrossover(ind1[W], ind2[W])\n        childChr.append(random.choice([ind1[W], ind2[W]]))\n    return ind1, ind2\n\n\ndef mutate(ind, mutpb=0.001, mutagg=12):\n    for C in range(len(ind)):\n        for BP in range(len(ind[C])):\n            if BP < 100:  # case BP is common base value;\n                if random.random() < mutpb:\n                    ind[C][BP] += random.choice(range(-mutagg, mutagg))\n            else:  # case BP is in fact a promoter;\n                pass\n    return ind,\n\n\ndef clone(Chr):  #!!review this\n    cut_point = random.randrange(- len(Chr), len(Chr))\n    if not cut_point:\n        cut_point = 1\n    if cut_point > 0:\n        new_chr = chr[:cut_point]\n    if cut_point < 0:\n        new_chr = chr[cut_point:]\n    Chr += new + Chr\n"
  },
  {
    "path": "promoterz/representation/deapCreator.py",
    "content": "#    This file is part of DEAP.\n#\n#    DEAP is free software: you can redistribute it and/or modify\n#    it under the terms of the GNU Lesser General Public License as\n#    published by the Free Software Foundation, either version 3 of\n#    the License, or (at your option) any later version.\n#\n#    DEAP is distributed in the hope that it will be useful,\n#    but WITHOUT ANY WARRANTY; without even the implied warranty of\n#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#    GNU Lesser General Public License for more details.\n#\n#    You should have received a copy of the GNU Lesser General Public\n#    License along with DEAP. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"The :mod:`~deap.creator` is a meta-factory allowing to create classes that\nwill fulfill the needs of your evolutionary algorithms. In effect, new\nclasses can be built from any imaginable type, from :class:`list` to\n:class:`set`, :class:`dict`, :class:`~deap.gp.PrimitiveTree` and more,\nproviding the possibility to implement genetic algorithms, genetic\nprogramming, evolution strategies, particle swarm optimizers, and many more.\n\"\"\"\n\nimport array\nimport copy\nimport warnings\nimport copyreg as copy_reg\n\nclass_replacers = {}\n\"\"\"Some classes in Python's standard library as well as third party library\nmay be in part incompatible with the logic used in DEAP. To palliate\nthis problem, the method :func:`create` uses the dictionary\n`class_replacers` to identify if the base type provided is problematic, and if\nso  the new class inherits from the replacement class instead of the\noriginal base class.\n\n`class_replacers` keys are classes to be replaced and the values are the\nreplacing classes.\n\"\"\"\n\ntry:\n    import numpy\n    (numpy.ndarray, numpy.array)\nexcept ImportError:\n    # Numpy is not present, skip the definition of the replacement class.\n    pass\nexcept AttributeError:\n    # Numpy is present, but there is either no ndarray or array in numpy,\n    # also skip the definition of the replacement class.\n    pass\nelse:\n    class _numpy_array(numpy.ndarray):\n        def __deepcopy__(self, memo):\n            \"\"\"Overrides the deepcopy from numpy.ndarray that does not copy\n            the object's attributes. This one will deepcopy the array and its\n            :attr:`__dict__` attribute.\n            \"\"\"\n            copy_ = numpy.ndarray.copy(self)\n            copy_.__dict__.update(copy.deepcopy(self.__dict__, memo))\n            return copy_\n\n        @staticmethod\n        def __new__(cls, iterable):\n            \"\"\"Creates a new instance of a numpy.ndarray from a function call.\n            Adds the possibility to instanciate from an iterable.\"\"\"\n            return numpy.array(list(iterable)).view(cls)\n            \n        def __setstate__(self, state):\n            self.__dict__.update(state)\n\n        def __reduce__(self):\n            return (self.__class__, (list(self),), self.__dict__)\n\n    class_replacers[numpy.ndarray] = _numpy_array\n\nclass _array(array.array):\n    @staticmethod\n    def __new__(cls, seq=()):\n        return super(_array, cls).__new__(cls, cls.typecode, seq)\n    \n    def __deepcopy__(self, memo):\n        \"\"\"Overrides the deepcopy from array.array that does not copy\n        the object's attributes and class type.\n        \"\"\"\n        cls = self.__class__\n        copy_ = cls.__new__(cls, self)\n        memo[id(self)] = copy_\n        copy_.__dict__.update(copy.deepcopy(self.__dict__, memo))\n        return copy_\n\n    def __reduce__(self):\n        return (self.__class__, (list(self),), self.__dict__)\nclass_replacers[array.array] = _array\n\nclass CreatorMeta(type):\n    def __new__(meta, name, base, dct):\n        return super(CreatorMeta, meta).__new__(meta, name, (base,), dct)\n\n    def __init__(cls, name, base, dct):\n        # A DeprecationWarning is raised when the object inherits from the \n        # class \"object\" which leave the option of passing arguments, but\n        # raise a warning stating that it will eventually stop permitting\n        # this option. Usually this happens when the base class does not\n        # override the __init__ method from object.\n        dict_inst = {}\n        dict_cls = {}\n        for obj_name, obj in dct.items():\n            if isinstance(obj, type):\n                dict_inst[obj_name] = obj\n            else:\n                dict_cls[obj_name] = obj\n        def initType(self, *args, **kargs):\n            \"\"\"Replace the __init__ function of the new type, in order to\n            add attributes that were defined with **kargs to the instance.\n            \"\"\"\n            for obj_name, obj in dict_inst.items():\n                setattr(self, obj_name, obj())\n            if base.__init__ is not object.__init__:\n                base.__init__(self, *args, **kargs)\n\n        cls.__init__ = initType\n        cls.reduce_args = (name, base, dct)\n        super(CreatorMeta, cls).__init__(name, (base,), dict_cls)\n\n    def __reduce__(cls):\n        return (meta_creator, cls.reduce_args)\n\ncopy_reg.pickle(CreatorMeta, CreatorMeta.__reduce__)\n\ndef meta_creator(name, base, dct):\n    class_ = CreatorMeta(name, base, dct)\n    globals()[name] = class_\n    return class_\n\ndef create(name, base, **kargs):\n    \"\"\"Creates a new class named *name* inheriting from *base* in the\n    :mod:`~deap.creator` module. The new class can have attributes defined by\n    the subsequent keyword arguments passed to the function create. If the\n    argument is a class (without the parenthesis), the __init__ function is\n    called in the initialization of an instance of the new object and the\n    returned instance is added as an attribute of the class' instance.\n    Otherwise, if the argument is not a class, (for example an :class:`int`),\n    it is added as a \"static\" attribute of the class.\n    \n    :param name: The name of the class to create.\n    :param base: A base class from which to inherit.\n    :param attribute: One or more attributes to add on instanciation of this\n                      class, optional.\n    \n    The following is used to create a class :class:`Foo` inheriting from the\n    standard :class:`list` and having an attribute :attr:`bar` being an empty\n    dictionary and a static attribute :attr:`spam` initialized to 1. ::\n    \n        create(\"Foo\", list, bar=dict, spam=1)\n        \n    This above line is exactly the same as defining in the :mod:`creator`\n    module something like the following. ::\n    \n        class Foo(list):\n            spam = 1\n            \n            def __init__(self):\n                self.bar = dict()\n\n    The :ref:`creating-types` tutorial gives more examples of the creator\n    usage.\n    \"\"\"\n\n    if name in globals():\n        warnings.warn(\"A class named '{0}' has already been created and it \"\n                      \"will be overwritten. Consider deleting previous \"\n                      \"creation of that class or rename it.\".format(name),\n                      RuntimeWarning)\n\n    # Check if the base class has to be replaced\n    if base in class_replacers:\n        base = class_replacers[base]\n    meta_creator(name, base, kargs)\n\n"
  },
  {
    "path": "promoterz/representation/oldschool.py",
    "content": "#!/bin/python\nimport random\nimport json\nimport os\n\nfrom copy import deepcopy\n\nfrom .import Creator\nfrom deap import base\nfrom deap import tools\n\n\nfrom . .import parameterOperations\n\n\ndef constructPhenotype(stratSettings, individue):\n    # THIS FUNCTION IS UGLYLY WRITTEN; USE WITH CAUTION;\n    # (still works :})\n    Strategy = individue.Strategy\n    R = lambda V, lim: ((lim[1] - lim[0]) / 100) * V + lim[0]\n    AttributeNames = sorted(list(stratSettings.keys()))\n    Phenotype = {}\n    for K in range(len(AttributeNames)):\n        Value = R(individue[K], stratSettings[AttributeNames[K]])\n        Phenotype[AttributeNames[K]] = Value\n    Phenotype = parameterOperations.expandNestedParameters(Phenotype)\n    return Phenotype\n\n\ndef createRandomVarList(IndSize):\n    VAR_LIST = [random.randrange(0, 100) for x in range(IndSize)]\n    return VAR_LIST\n\n\ndef initInd(Criterion, Attributes):\n    w = Criterion()\n    IndSize = len(list(Attributes.keys()))\n    w[:] = createRandomVarList(IndSize)\n    return w\n\n\ndef getToolbox(Strategy, genconf, Attributes):\n    toolbox = base.Toolbox()\n    creator = Creator.init(base.Fitness, {'Strategy': Strategy})\n    toolbox.register(\"newind\", initInd, creator.Individual, Attributes)\n    toolbox.register(\"population\", tools.initRepeat, list, toolbox.newind)\n    toolbox.register(\"mate\", tools.cxTwoPoint)\n    toolbox.register(\"mutate\", tools.mutUniformInt, low=10, up=10, indpb=0.2)\n    toolbox.register(\"constructPhenotype\", constructPhenotype, Attributes)\n    return toolbox\n"
  },
  {
    "path": "promoterz/sequence/__init__.py",
    "content": "#!/bin/python\n\nfrom .locale import standard_loop\nfrom .world import parallel_world\n"
  },
  {
    "path": "promoterz/sequence/locale/standard_loop.py",
    "content": "#!/bin/python\nfrom deap import tools\nfrom copy import deepcopy\nimport random\nfrom deap import algorithms\n\nfrom ... import statistics\nfrom ... import evolutionHooks\nfrom ... import validation\nfrom ... import supplement\n\n\ndef checkPopulation(population, message):\n    if not (len(population)):\n        print(message)\n\n\ndef execute(World, locale):\n\n    # --populate if we don't have population (migrations might do it);\n    if not (locale.population):\n        locale.population = locale.extratools.ImmigrateRandom(\n            (5, 10),\n            locale.population\n        )\n\n    locale.extraStats = {}\n\n    # --validate individuals;\n    locale.population = validation.validatePopulation(\n        World.tools.constructPhenotype,\n        World.TargetParameters,\n        locale.population\n    )\n\n    # --remove equal citizens before evaluation for efficency\n    nonevaluated = [ind for ind in locale.population if not ind.fitness.valid]\n    Lu = len(nonevaluated)\n    print(\"first unevaluated: %i\" % len(nonevaluated))\n    remains = locale.extratools.populationPD(nonevaluated, 1.0)\n    Lr = len(remains)\n    print(\"%i individues removed due to equality\" % (Lu - Lr))\n    locale.population = [\n        ind for ind in locale.population if ind.fitness.valid\n    ] + remains\n\n    # --load current dataset for locale;\n    locale.Dataset = World.loadDatasetForLocalePosition(locale.position)\n\n    # --evaluate individuals;\n    locale.extraStats['nb_evaluated'], locale.extraStats[\n        'avgTrades'\n    ] = World.parallel.evaluatePopulation(\n        locale\n    )\n\n    locale.extraStats['avgExposure'] = sum(\n        [I.averageExposure\n         for I in locale.population])/len(locale.population)\n\n    # --send best individue to HallOfFame;\n    if not locale.EPOCH % 15:\n        BestSetting = tools.selBest(locale.population, 1)[0]\n        locale.HallOfFame.insert(BestSetting)\n    assert (sum([x.fitness.valid for x in locale.population]) == len(locale.population))\n\n    # --compile stats;\n    World.EvaluationModule.compileStats(locale)\n\n    # --population ages\n    qpop = len(locale.population)\n    locale.population = locale.extratools.populationAges(\n        locale.population, locale.EvolutionStatistics[locale.EPOCH]\n    )\n    wpop = len(locale.population)\n    locale.extraStats['nbElderDies'] = qpop - wpop\n\n    # INDIVIDUE FITNESS ATTRIBUTES FILTERS;\n    # --remove very inapt citizens\n    if World.conf.generation.minimumProfitFilter is not None:\n        locale.extratools.filterThreshold(World.conf.generation.minimumProfitFilter,\n                                          World.conf.generation._lambda)\n        checkPopulation(locale.population,\n                        \"Population dead after profit filter.\")\n\n    # --remove individuals below tradecount\n    if World.conf.generation.TradeNumberFilterRange is not None:\n        locale.extratools.filterTrades(World.conf.generation.TradeNumberFilterRange,\n                                       World.conf.generation._lambda)\n        checkPopulation(locale.population,\n                        \"Population dead after trading number filter.\")\n\n    # --remove individues based on average roundtripe exposure time;\n    if World.conf.generation.averageExposureLengthFilterRange is not None:\n        locale.extratools.filterExposure(\n            World.conf.generation.averageExposureLengthFilterRange,\n            World.conf.generation._lambda\n        )\n        checkPopulation(locale.population,\n                        \"Population dead after roundtrip exposure filter.\")\n\n    if not locale.population:\n        locale.population = World.tools.population(World.conf.generation.POP_SIZE)\n        print(\"Repopulating... Aborting epoch.\")\n\n    # --show stats;\n    World.EvaluationModule.showStatistics(locale)\n\n    # --calculate new population size;\n    if locale.EPOCH:\n        PRoFIGA = supplement.PRoFIGA.calculatePRoFIGA(\n            World.conf.generation.PRoFIGA_beta,\n            locale.EPOCH,\n            World.conf.generation.NBEPOCH,\n            locale.EvolutionStatistics[locale.EPOCH - 1],\n            locale.EvolutionStatistics[locale.EPOCH],\n        )\n        locale.POP_SIZE += locale.POP_SIZE * PRoFIGA\n\n        # put population size inside thresholds;\n        minps = World.conf.generation.POP_SIZE // 2\n        maxps = World.conf.generation.POP_SIZE * 3\n        try:\n            _POP_SIZE = max(min(locale.POP_SIZE, maxps), minps)\n            locale.POP_SIZE = int(round(_POP_SIZE))\n        except Exception as e:\n            locale.POP_SIZE = 30\n            M = \"POP_SIZE PROFIGA ERROR;\"\n            print(M)\n\n    # --filter best inds;\n    locale.population[:] = evolutionHooks.selBest(locale.population,\n                                                  locale.POP_SIZE)\n    checkPopulation(locale.population,\n                    \"Population dead after selection of score filter.\")\n    assert (None not in locale.population)\n\n    # --select best individues to procreate\n    LAMBDA = max(World.conf.generation._lambda,\n                 locale.POP_SIZE - len(locale.population))\n\n    TournamentSize = max(2 * LAMBDA,\n                         len(locale.population))\n\n    offspring = evolutionHooks.Tournament(locale.population,\n                                          LAMBDA,\n                                          TournamentSize)\n\n    offspring = [deepcopy(x) for x in offspring]  # is deepcopy necessary?\n\n    # --modify and integrate offspring;\n    offspring = algorithms.varAnd(\n        offspring, World.tools, World.conf.generation.cxpb, World.conf.generation.mutpb\n    )\n\n    locale.extratools.ageZero(offspring)\n    locale.population += offspring\n\n    # --NOW DOESN'T MATTER IF SOME INDIVIDUE LACKS FITNESS VALUES;\n    assert (None not in locale.population)\n\n    # --immigrate individual from HallOfFame;\n    if random.random() < 0.2:\n        locale.population = locale.extratools.ImmigrateHoF(locale.population)\n\n    # --immigrate random number of random individues;\n    if random.random() < 0.5 or not locale.population:\n        locale.population = locale.extratools.ImmigrateRandom(\n            (2, 7),\n            locale.population\n        )\n\n    assert (None not in locale.population)\n"
  },
  {
    "path": "promoterz/sequence/world/parallel_world.py",
    "content": "#!/bin/python\nimport random\nimport itertools\nimport math\nimport time\n\n\ndef execute(World):\n\n    # --APPLY MIGRATION BETWEEN LOCALES;\n    if len(World.locales):\n        S, D = False, False\n        LocalePairs = itertools.combinations(World.locales, 2)\n        for L in LocalePairs:\n            distance = World.calculateDistance(L[0].position, L[1].position)\n            distance_weight = distance / World.maxdistance\n            if random.random() > distance_weight:\n                World.migration(L[0], L[1], (1, 7))\n                World.migration(L[1], L[0], (1, 7))\n\n    # --APPLY LOCALE CREATION;\n    if random.random() < World.conf.generation.localeCreationChance / 100:\n        World.generateLocale()\n\n    # --APPLY RANDOMIC LOCALE DESTRUCTION;\n    if random.random() < World.conf.generation.localeExplodeChance / 100:\n        chosenLocale = random.choice(World.locales)\n        World.explodeLocale(chosenLocale)\n\n    # --APPLY EXPECTED LOCALE DESTRUCTION;\n    for L in range(len(World.locales)):\n        if World.locales[L].EPOCH > World.conf.generation.localeExpirationAge:\n            if len(World.locales) > 2:\n                World.explodeLocale(World.locales[L])\n                #  if two locales are destroyed @ same time, post-locale migrations\n                #  will be a mess\n                break\n\n    # --APPLY LOCALE WALKS;\n    for L in range(len(World.locales)):\n        if random.random() < World.conf.generation.localeWalkChance / 100:\n            World.localeWalk(World.locales[L])\n"
  },
  {
    "path": "promoterz/statistics.py",
    "content": "#!/bin/python\n\nimport numpy as np\nfrom deap import tools\n\n\ndef getStatisticsMeter():\n    stats = tools.Statistics(lambda ind: ind.fitness.values[0])\n    stats.register(\"avg\", np.mean)\n    stats.register(\"std\", np.std)\n    stats.register(\"min\", np.min)\n    stats.register(\"max\", np.max)\n\n    return stats\n"
  },
  {
    "path": "promoterz/supplement/PRoFIGA.py",
    "content": "#!/bin/python\n\ndef calculatePRoFIGA(beta, EPOCH, NBEPOCH, oldstats, Stats):\n    remainingEPOCH_NB = NBEPOCH - EPOCH\n\n    X = beta * remainingEPOCH_NB * (Stats['max'] - oldstats['max']) / oldstats['max']\n    return X\n"
  },
  {
    "path": "promoterz/supplement/age.py",
    "content": "# 1/bin/python\nfrom deap import base\n\n\ndef _maturePopulation(population):\n    for W in range(len(population)):\n        try:\n            assert (population[W].Age)\n        except:\n            population[W].Age = 0\n        population[W].Age += 1\n\n\ndef _checkRetirement(individue, statistics, ageBoundary):\n    # (Minetti, 2005)\n    indscore = individue.fitness.values[0]\n    N = (ageBoundary[1] - ageBoundary[0]) / 2\n    aptitude = indscore - statistics['avg']\n    if aptitude > 0:\n        ABC = sum(ageBoundary) / 2\n        RSB = statistics['max'] - statistics['avg']\n    else:\n        ABC = ageBoundary[0]\n        RSB = statistics['avg'] - statistics['min']\n    RSB = max(1, RSB)\n    survival = ABC + (N * aptitude / RSB)\n    # oldenough = individue.Age > ageBoundary[0]\n    #relativeAge = (individue.Age-ageBoundary[0]) / (ageBoundary[1]-ageBoundary[0])\n    retires = individue.Age - survival > ageBoundary[1]\n    # print(survival)\n    return retires\n\n\ndef _killElders(population, statistics, ageBoundary):\n    for I in range(len(population)):\n        if _checkRetirement(population[I], statistics, ageBoundary):\n            population[I] = None\n    population = [x for x in population if x]\n    return population\n\n\ndef ageZero(population):\n    for q in range(len(population)):\n        population[q].Age = 0\n\n\ndef populationAges(ageBoundary, population, averageScore):\n    _maturePopulation(population)\n    population = _killElders(population, averageScore, ageBoundary)\n    return population\n"
  },
  {
    "path": "promoterz/supplement/phenotypicDivergence.py",
    "content": "#!/bin/python\nfrom deap import tools\nfrom . .import parameterOperations\nimport random\n\n\ndef checkPhenotypicDivergence(constructPhenotype, indA, indB):\n    cmp = [indA, indB]\n    cmp = [constructPhenotype(x) for x in cmp]\n    cmp = [parameterOperations.flattenParameters(x) for x in cmp]\n    score = 0\n    for w in cmp[0].keys():\n        if cmp[0][w] != cmp[1][w]:\n            score += 1\n    return score\n\n\ndef populationPhenotypicDivergence(constructPhenotype, population, delpercent):\n    if len(population) > 1:\n        for I in range(len(population) - 1):\n            for J in range(I + 1, len(population)):\n                if population[I]:\n                    score = checkPhenotypicDivergence(\n                        constructPhenotype, population[I], population[J]\n                    )\n                    if not score and random.random() < delpercent:\n                        population[I] = None\n    population = [x for x in population if x]\n    return population\n"
  },
  {
    "path": "promoterz/validation.py",
    "content": "#!/bin/python\nfrom .parameterOperations import flattenParameters\n\n\ndef checkPhenotypeParameterIntegrity(TargetParameters, phenotype):\n    cmp = [TargetParameters, phenotype]\n    cmp = [flattenParameters(x) for x in cmp]\n    # print(cmp)\n    cmp = [list(x.keys()) for x in cmp]\n    # print(\"%i ---- %i\" % (len(cmp[0]), len(cmp[1])))\n    for w in cmp[0]:\n        if not w in cmp[1]:\n            return w\n\n    return None\n\n\ndef checkPhenotypeAttributeRanges(TargetParameters, phenotype, tolerance=0.3):\n    cmp = [TargetParameters, phenotype]\n    cmp = [flattenParameters(x) for x in cmp]\n    for K in cmp[0].keys():\n        high_bound = cmp[0][K][1] + (tolerance * abs(cmp[0][K][1]))\n        low_bound = cmp[0][K][0] - (tolerance * abs(cmp[0][K][0]))\n        higher = cmp[1][K] > high_bound\n        lower = cmp[1][K] < low_bound\n        if higher or lower:\n            return \"%f %s %f\" % (low_bound, K, high_bound)\n\n    return None\n\n\ndef validatePopulation(IndividualToSettings, TargetParameters, population):\n    ErrMsg = \"--destroying invalid citizen--\\n\\t({ErrType} {ErrParameter})\\n\"\n    for p in range(len(population)):\n        phenotype = IndividualToSettings(population[p])\n        Err = checkPhenotypeParameterIntegrity(TargetParameters, phenotype)\n        if Err:\n            print(ErrMsg.format(ErrType='missing parameter', ErrParameter=Err))\n            population[p] = None\n            continue\n\n        Err = checkPhenotypeAttributeRanges(TargetParameters, phenotype)\n        if Err:\n            print(ErrMsg.format(ErrType=' invalid values on', ErrParameter=Err))\n            population[p] = None\n        if not population[p]:\n            print(phenotype)\n            pass\n    population = [x for x in population if x]\n    return population\n"
  },
  {
    "path": "promoterz/webServer/__init__.py",
    "content": "#!/bin/python\n\nfrom .import core\n"
  },
  {
    "path": "promoterz/webServer/core.py",
    "content": "#!/bin/python\nimport os\nimport re\nimport datetime\n\nimport flask\nimport dash\n\nfrom dash.dependencies import Input, Output\n\nimport dash_core_components as dcc\nimport dash_html_components as html\n\nfrom flask_caching import Cache\nfrom evaluation.gekko.statistics import epochStatisticsNames, periodicStatisticsNames\n\nfrom . import graphs\nfrom . import layout\n\nimport functools\nimport logging\nlog = logging.getLogger('werkzeug')\nlog.setLevel(logging.ERROR)\n\n\ndef build_server(webpageTitle):\n    # Setup the app\n    server = flask.Flask(__name__)\n    app = dash.Dash(__name__, server=server, csrf_protect=False)\n\n    app.scripts.config.serve_locally = False\n    app.css.config.serve_locally = False\n\n    app.webpageTitle = webpageTitle\n\n    timeout = 60 * 60  # 1 hour\n\n    app.startTime = datetime.datetime.now()\n\n    # Graph Update function bindings;\n    app.updateLocaleGraph = graphs.updateLocaleGraph\n    app.updateWorldGraph = graphs.updateWorldGraph\n    app.updateEvalBreakGraph = graphs.updateEvalbreakGraph\n\n    # Graphics initialization and input points against World;\n    # why is this placeholder required? ;(\n    app.WorldGraph = dcc.Graph(id='WorldGraph', figure={})\n    app.LocaleGraphs = []\n    app.EvalBreakGraph = []\n\n    app.resultParameters = []\n    app.epochInfo = \"\"\n    app.layout = functools.partial(layout.getLayout, app)\n\n    app.config['suppress_callback_exceptions'] = False\n\n    # event triggers\n    onRefreshClick = Input('refresh-button', 'n_clicks')\n\n    \"\"\"\n    # update graph methods\n    @app.callback(Output('last-refresh', 'children'),\n                  [Input('refresh-button', 'n_clicks')])\n    def display_time(w):\n        print(\"Refreshing graphical interface graphics.\")\n        return str(datetime.datetime.now())\n\n    @app.callback(Output('WorldGraph', 'children'),\n                  [Input('refresh-button', 'n_clicks')])\n    def updateGGraphs(w):\n        return [app.WorldGraph]\n\n    @app.callback(Output('LocaleGraphs', 'children'),\n                  [Input('refresh-button', 'n_clicks')])\n    def updateLGraphs(w):\n        return [app.GraphicList]\n    \"\"\"\n\n    # SELECT PAGE;\n    @app.callback(dash.dependencies.Output('page-content', 'children'),\n                  [dash.dependencies.Input('url', 'pathname')])\n    def display_page(pathname):\n        if re.findall(\"evalbreak\", str(pathname)):\n            return layout.getEvalbreak(app)\n        if re.findall(\"results\", str(pathname)):\n            return layout.getResults(app)\n        else:\n            return layout.getCommon(app)\n\n    @server.route('/static/<path:path>')\n    def send_css(path):\n        return flask.send_from_directory(os.path.dirname(__file__), path)\n\n    # load external css\n    currentDirectory = os.path.dirname(os.path.abspath(__file__))\n    externalCssListPath = os.path.join(currentDirectory,\n                                       \"external_css_list.txt\")\n\n    with open(externalCssListPath) as cssListFile:\n        external_css = cssListFile.read().split(\"\\n\")\n        external_css = list(filter(None, external_css))\n\n    for css in external_css:\n        app.css.append_css({\"external_url\": css})\n\n    # launch DASH APP\n    return app, server\n"
  },
  {
    "path": "promoterz/webServer/external_css_list.txt",
    "content": "https://fonts.googleapis.com/css?family=Overpass:400,400i,700,700i\nhttps://cdn.jsdelivr.net/gh/plotly/dash-app-stylesheets@c6a126a684eaaa94a708d41d6ceb32b28ac78583/dash-technical-charting.css\n"
  },
  {
    "path": "promoterz/webServer/graphs.py",
    "content": "#!/bin/python\n\nimport dash_core_components as dcc\n\nfrom evaluation.gekko.statistics import epochStatisticsNames, periodicStatisticsNames\n\n\ndef updateWorldGraph(app, WORLD):\n    environmentData = [\n        {\n        }\n    ]\n    populationGroupData = [\n        {\n            'x': [locale.position[0]],\n            'y': [locale.position[1]],\n            'type': 'scatter',\n            'name': locale.name,\n            'showscale': False,\n            'mode': 'markers',\n            'marker': {\n                'symbol': 'square'\n            }\n\n        } for locale in WORLD.locales\n    ]\n\n    fig = {\n        'data': populationGroupData,\n        'layout': {\n            'title': \"World Topology: 2D MAP\"\n        }\n    }\n\n\n    G = dcc.Graph(id=\"WorldGraph\", figure=fig)\n    #app.layout.get(\"WorldGraphContainer\").children = [G]\n    app.WorldGraph = G\n    return G\n\n\ndef updateLocaleGraph(app, LOCALE):\n\n    GraphName = LOCALE.name\n    print('Loading %s' % GraphName)\n    Statistics = LOCALE.EvolutionStatistics\n    ID = [s for s in GraphName if s.isdigit()]\n    annotations = []\n\n    oldLocaleGraph = None\n    for lidx, localeGraph in enumerate(app.LocaleGraphs):\n        if localeGraph.id == LOCALE.name:\n            oldLocaleGraph = lidx\n            break\n\n    statisticsNames = {}\n    statisticsNames.update(epochStatisticsNames)\n    # statisticsNames.update(periodicStatisticsNames)\n\n    annotationFontDescription = {\n        'family': 'Arial',\n        'size': 12,\n        'color': 'rgb(37,37,37)'\n    }\n\n    \"\"\"\n    for Statistic in Statistics:\n        if 'dateRange' in Statistic.keys():\n            if Statistic['dateRange']:\n                for R, dateRange in enumerate(Statistic['dateRange']):\n                    if dateRange is not None:\n                        annotations.append(\n                            {\n                                'xref': 'axis',\n                                'yref': 'paper',\n                                'xanchor': 'left',\n                                'yanchor': 'bottom',\n                                'font': annotationFontDescription,\n                                'x': R,\n                                'y': 1 if not len(annotations) %\n                                2 else 0.93,  # avoid label overlap;\n                                'text': dateRange,\n                            }\n                        )\n    \"\"\"\n\n    colorSequence = [\n        (188, 189, 34),\n        (100, 11, 182),\n        (186, 3, 34),\n        (45, 111, 45),\n        (66, 128, 66),\n        (128, 66, 66),\n    ]\n    statNames = [\n        'avg', 'std', 'min',\n        'max',\n        #'evaluationScore',\n        #'evaluationScoreOnSecondary'\n    ]\n\n    DATA = [\n            {\n                'x': [Statistic['id'] for Statistic in Statistics],\n                'y': [Statistic[statNames[S]] for Statistic in Statistics],\n                'type': 'line',\n                'name': statisticsNames[statNames[S]],\n                'line': {'color': 'rgb%s' % str(colorSequence[S])},\n            }\n            for S in range(len(statNames))\n        ]\n\n    fig = {\n        'data': DATA,\n        'layout': {\n            'title': 'Evolution at %s' % GraphName,\n            'annotations': annotations\n        },\n    }\n\n    G = dcc.Graph(figure=fig, id=LOCALE.name)\n    if oldLocaleGraph is not None:\n        app.LocaleGraphs[oldLocaleGraph] = G\n    else:\n        app.LocaleGraphs.append(G)\n\n    return G\n\n\ndef updateEvalbreakGraph(app, EvaluationSummary):\n\n    K = [\"evaluation\", \"secondary\"]\n    GES = dict([(k, []) for k in K])\n    for E in EvaluationSummary:\n        for k in K:\n            if k in E.keys():\n                GES[k].append(E[k])\n            else:\n                GES[k].append(None)\n\n    DATA = [\n        {\n            'x': list(range(len(GES[KEY]))),\n            'y': GES[KEY],\n            'type': 'line',\n            'name': KEY.upper()\n        } for KEY in GES.keys()\n    ]\n\n    figure = {\n        'data': DATA,\n        'layout': {\n            'title': \"Evaluation Breaks\"\n        }\n    }\n\n    G = dcc.Graph(figure=figure, id=\"EvaluationBreaksGraph\")\n    app.EvalBreakGraph = G\n    return G\n"
  },
  {
    "path": "promoterz/webServer/layout.py",
    "content": "#!/bin/python\n\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport datetime\n\n\nallStyle = {\n            'width': '1100',\n            'margin-left': 'auto',\n            'margin-right': 'auto',\n            'font-family': 'overpass',\n            'background-color': '#F3F3F3'\n        }\n\n\ndef getLayout(app):\n    layout = html.Div([\n        dcc.Location(id='url', refresh=False),\n        getHeader(app),\n        html.Div(id='page-content')\n    ])\n    return layout\n\n\ndef getHeader(app):\n    # this is a mess;\n    inlineBlock = {\"display\": \"inline-block\"}\n    headerWidgets = [\n        html.Button(\"Refresh\", id='refresh-button'),\n        html.Div(\n            [\n                html.Div(\"Last refresh @ \", style=inlineBlock.update({\"float\": \"left\"})),\n                html.Div(datetime.datetime.now(),\n                         id='last-refresh', className=\"showTime\",\n                         style=inlineBlock.update({\"float\": \"left\"})),\n\n                html.Div(\"%s Start time\" % app.startTime,\n                         id='start-time', className=\"showTime\",\n                         style=inlineBlock.update({\"float\": \"right\"})),\n                html.Br(),\n                html.Center([\n                    html.Div(app.epochInfo, id=\"current-epoch\")\n                    ])\n            ], className=\"showTime\")\n    ]\n\n    pageMenu = [\n        html.A(html.Button(\"Evolution Statistics\"), href=\"/\"),\n        html.A(html.Button(\"Evaluation Breaks\"), href=\"/evalbreak\"),\n        html.A(html.Button(\"View Results\"), href=\"/results\")\n        # html.Button(\"View Settings\", className=\"unimplemented\"),\n        # html.Button(\"Inspect Population\", className=\"unimplemented\")\n    ]\n\n\n\n    # html.Link(rel='stylesheet', href='/static/promoterz_style.css'),\n    header = html.Div(\n        [\n            html.H2(\n                app.webpageTitle,\n                style={'padding-top': '20', 'text-align': 'center'},\n            ),\n            html.Div(headerWidgets),\n            html.Div(pageMenu),\n        ],\n        style=allStyle)\n\n    return header\n\n\ndef getCommon(app):\n    return html.Div([\n        html.Div(children=app.WorldGraph, id='WorldGraphContainer'),\n        html.Div(children=app.LocaleGraphs, id='LocaleGraphsContainer')\n    ], style=allStyle)\n\n\ndef getEvalbreak(app):\n    return html.Div([\n        html.Div(children=app.EvalBreakGraph, id='EvalBreakContainer')\n    ], style=allStyle)\n\n\ndef getResults(app):\n    return [html.Textarea(str(r[0]) + '\\n' + str(r[1]), style={'width': '525', 'height': '550'}) for r in app.resultParameters]\n"
  },
  {
    "path": "promoterz/webServer/promoterz_style.css",
    "content": ".unimplemented {\n    background-color: #666;\n} \n\n.showTime {\n    display: inline-block;\n}\n"
  },
  {
    "path": "promoterz/world.py",
    "content": "#!/bin/python\nimport random\n\nimport time\nimport math\n\nfrom . import locale\n\n\nclass World():\n    def __init__(\n            self,\n            GlobalTools=None,\n            populationLoops=None,\n            worldLoops=None,\n            conf=None,\n            TargetParameters=None,\n            EnvironmentParameters=None,\n            onInitLocale=None,\n            web=None,\n    ):\n        self.tools = GlobalTools\n\n        # main components\n        self.populationLoops = populationLoops\n        self.worldLoops = worldLoops\n\n        # genetic algorithm status\n        self.EPOCH = 0\n        self.locales = []\n        self.totalEvaluations = 0\n\n        # genetic algorithm attributes\n        self.size = [500, 500]\n        self.maxdistance = self.calculateDistance([0, 0], self.size)\n        self.TargetParameters = TargetParameters\n\n        self.conf = conf\n        # Temporary assignment of configs\n\n        self.localeID = 1\n        self.EnvironmentParameters = EnvironmentParameters\n        self.onInitLocale = onInitLocale\n        self.web = web\n\n    def generateLocale(self):\n        name = 'Locale%i' % (self.localeID)\n        self.localeID += 1\n        position = [random.randrange(0, self.size[x]) for x in range(2)]\n\n        L = locale.Locale(self,\n                          name,\n                          position,\n                          random.choice(self.populationLoops)\n        )\n\n        self.locales.append(L)\n\n    def migration(self, source, target, number_range):\n        number = random.randrange(*number_range)\n        for W in range(number):\n            if len(source.population):\n                index = random.randrange(0, len(source.population))\n                individual = source.population.pop(index)\n                del individual.fitness.values\n                target.population.append(individual)\n\n    def explodeLocale(self, explLocale):\n        if len(self.locales) < 2:\n            return\n\n        totaldistance = 0\n        for T in self.locales:\n            if explLocale == T:\n                T.tempdist = 0\n                continue\n\n            distance = self.calculateDistance(\n                explLocale.position, T.position)\n            T.tempdist = distance\n            totaldistance += distance\n        for T in self.locales:\n            fugitiveNumber = T.tempdist / totaldistance *\\\n                len(explLocale.population)\n\n            T.fugitivenumber = int(round(fugitiveNumber))\n\n        for T in self.locales:\n            self.migration(explLocale, T,\n                           (T.fugitivenumber, T.fugitivenumber + 1))\n            del T.tempdist\n            del T.fugitivenumber\n\n        self.locales = [x for x in self.locales if x != locale]\n\n    def runEpoch(self):\n        epochHeader = \"EPOCH %i/%i\" % (\n            self.EPOCH,\n            self.conf.generation.NBEPOCH\n        )\n\n        print(\"\\t====== %s ======\" % epochHeader)\n        epochStartTime = time.time()\n\n        if self.web:\n            self.epochInfo = epochHeader\n            self.web.updateWorldGraph(app=self.web, WORLD=self)\n\n        for LOCALE in self.locales:\n            LOCALE.run()\n            if self.web:\n                self.web.updateLocaleGraph(app=self.web, LOCALE=LOCALE)\n\n        self.worldLoops[0](self)\n\n        self.EPOCH += 1\n        epochRunTime = time.time() - epochStartTime\n        print(\"Epoch runs in %.2f seconds;\" % epochRunTime)\n        if not self.EPOCH % 10:\n            print(\"Backend power %s\" % self.parallel.lasttimesperind)\n        print(\"\")\n\n    @staticmethod\n    def calculateDistance(point1, point2):\n        x = abs(point1[0] - point2[0])\n        y = abs(point1[1] - point2[1])\n        D = math.sqrt(x ** 2 + y ** 2)\n        return D\n\n    def seedEnvironment(self):\n        # round to nearest square number\n        self.sectorSeedRoot = round(math.sqrt(self.conf.generation.worldSeedSize))\n\n        self.environmentSectors = []\n        for i in range(self.sectorSeedRoot):\n            row = []\n            for j in range(self.sectorSeedRoot):\n                ENV = self.onInitLocale(self)\n                row.append(ENV)\n            self.environmentSectors.append(row)\n\n    def loadDatasetForLocalePosition(self, position):\n        pos = [math.floor(P / self.size[p] * self.sectorSeedRoot)\n               for p, P in enumerate(position)]\n\n        return self.environmentSectors[pos[0]][pos[1]]\n\n    def localeWalk(self, locale):\n        ammount = self.conf.generation.localeWalkDistance\n\n        variation = [random.randrange(-ammount, ammount)\n                     for i in range(2)]\n\n        for i in range(2):\n            locale.position[i] += variation[i]\n\n            # put it inside boundaries,\n            # make world appear rounded like our planet :3;\n\n            # fix too low values\n            while locale.position[i] < 0:\n                locale.position[i] += self.size[i]\n\n            # fix too high values\n            locale.position[i] = locale.position[i] % self.size[i]\n"
  },
  {
    "path": "requirements.txt",
    "content": "ccxt==1.13.139\npandas_datareader==0.5.0\nnumpy==1.16.2\ntulipy==0.2\npandas==0.18.1\ndeap==1.2.2\nscipy==0.19.0\npytoml==0.1.16\nJs2Py==0.59\nFlask_Caching==1.4.0\nQuandl==3.4.0\ndash==0.39.0\ndash-daq==0.1.0\nFlask==1.0.2\nrequests>=2.20.0\nbayesian_optimization==0.6.0\nzipline==1.2.0\narch==4.3.1\nnames==0.3.0\nmatplotlib==2.2.2\npython_dateutil==2.7.3\npytz==2018.5\nscikit_learn==0.19.2\nwaitress\n"
  },
  {
    "path": "settings/_Global.toml",
    "content": "gekkoPath = '$HOME/gekko'\nconfigFilename = 'example-config.js'\nlog_name = 'evolution_gen.csv'\n\n# Hosts list of remote machines running gekko, to distribute evaluation load;\n# option values: path to HOSTS file list OR False;\nRemoteAWS = '../AmazonSetup/hosts'\n\n# Your gekko local URL - CHECK THIS!\n# gekko:3000 stands for the default url at docker-compose scheme of things.\nGekkoURLs = ['http://localhost:3000', 'http://gekko:3000']\nshowFailedStrategies = true\n\n"
  },
  {
    "path": "settings/_backtest.toml",
    "content": "# show gekko verbose (strat info) - gekko must start with -d flag;\ngekkoDebug = 0\n# time window size on days of candlesticks for each evaluation\ndeltaDays = 90\n# candle size for gekko backtest in minutes\ncandleSize = 10\n# mode of profit interpretation = v1, v2 or v3.\n# please check the first functions at evaluation.gekko.backtest\n# to understand what is this. has big impact on evolutionary agenda.\ninterpreteBacktestProfit = 'v3'\n# Number of candlestick data loaded simultaneously in each locale;\n# slower EPOCHS theoretical better evolution;\n# seems broken. values other than 1 makes evolution worse.\nParallelCandlestickDataset = 1\n# number of parallel backtests running on gekko;\nParallelBacktests = 6\n\n\n"
  },
  {
    "path": "settings/_bayesian.toml",
    "content": ""
  },
  {
    "path": "settings/_binance.toml",
    "content": "credentialsFilePath = \"\"\nstrategyRunTimePeriodHours = 12\nstrategySelectorSigma = 10\n# following option points to binance asset/currency .json file located at\n# 'exchanges' folder of gekko. selecting it up is optional.\nbinanceAssetCurrencyTargetFilePath = \"\"\n"
  },
  {
    "path": "settings/_dataset.toml",
    "content": "# span in days from the end of dataset to the beggining. Or zero.\n# (to restrain length);\ndataset_span = 0\n# span for evaluation dataset. same scheme.\neval_dataset_span = 0\n\n\n# -- Gekko Dataset Settings\n# in order to enable dataset selection, turn off autoselect entry;\n[dataset_source]\nautoselect = true\nexchange = 'kraken'\ncurrency = 'USD'\nasset = 'LTC'\n\n[dataset_source2]\nautoselect = true\nexchange = 'kraken'\ncurrency = 'USD'\nasset = 'LTC'\n\n[eval_dataset_source]\nautoselect = true\nexchange = 'kraken'\ncurrency = 'USD'\nasset = 'LTC'\n\n\n\n"
  },
  {
    "path": "settings/_evalbreak.toml",
    "content": "# number of individues selected by score on each evaluation break for each locale;\nNBBESTINDS = 1\n# number of individues randomly selected on each evaluation break for each locale;\nNBADDITIONALINDS = 4\n\n# show current best settings on every X epochs. (or False)\nevaluateSettingsPeriodically = 50\n\n# number of evaluations on evaluation break. for each selected individue on locales;\nproofSize = 12\n\n\n"
  },
  {
    "path": "settings/_generation.toml",
    "content": "\n# Verbose single evaluation results;\nshowIndividualEvaluationInfo = false\n\n# if parameter is set to value rather than tuple limits at settings make the value\n# a tuple based on chosen spread value (percents); value = 10 --spread=50-->  value = (515)\nparameter_spread  = 60\n\n# Initial population size per locale\nPOP_SIZE = 50\n\n# number of epochs to run\nNBEPOCH = 3000\n\n# number of locales on parallel GA;\nNBLOCALE = 3\n\n# -- Genetic Algorithm Parameters\ncxpb = 0.8 # Probabilty of crossover \nmutpb = 0.2# Probability of mutation;\n_lambda = 14# size of offspring generated per epoch;\n\n# weight of PRoFIGA calculations on variability of population size\nPRoFIGA_beta = 0.005\nageBoundaries = [9, 19] # minimum age to die age when everyone dies (on EPOCHS)\n\n# after this age in epoches locale surely explodes i.e. ends.\nlocaleExpirationAge = 100\n\n# chance on each epoch of a locale to finish [in percentage];\nlocaleExplodeChance = 2\n\n# chance on each epoch of a locale creation [in percentage];\nlocaleCreationChance = 2\n\n# chance on each epoch of a locale to change position [in percentage];\nlocaleWalkChance = 40\n\n# max distance a locale can walk across world map on each epoch;\nlocaleWalkDistance = 5\n\n# number of different candle date ranges to seed world;\n# no area overlap, no empty spaces.\n# this rounds to the nearest square number.\nworldSeedSize = 40\n\n# filter individuals for minimum profit (or set to None)\nminimumProfitFilter = -15\n\n# filter individuals for minimum trade count; [has heavy impact] (or set to None)\nTradeNumberFilterRange = [6, 300]\n\n# filter individuals with roundtripe duration outside this range of values in hours (or set to None)\naverageExposureLengthFilterRange = [0, 300]\n\n\n# until another time range in dataset is selected;\n\n# chromosome settings are for -gc mode which uses another GA internal representation mode\n# for parameter values of each individue;\n# check promoterz/representation/chromosome.py to see how it works.\n# both parameters interact with crossover probability AKA cxpb;\n# practical effects on evolution are really uknown;\n\n[chromosome]\n# length of the representation for each parameter. largers sizes should mantain\n# evolutionary dynamics for parameters with proportional larger ranges;\nGeneSize = 3\n# number of parameters represented by each cromosome;\n# this should mantain evolutionary dynamics for strategies with proportional larger\n# parameter counts;\nDensity = 2\n\n\n# weights to score each individual self explanatory;\n[weights]\nprofit = 1.0\nsharpe = 0.1\n\n\n\n\n\n"
  },
  {
    "path": "stratego/README.md",
    "content": "### Usage\n\nThis is a submodule of japonicus. This takes care of on-the-fly strategy creation and management.\nIts on a very beta stage, like the rest of this GA implementation.\n\n### TODO:\n\nThe method is to just sum indicators, I.E to buy, all indicators should be above threshold.\nThat can work, but more complex interactions between indicators should be implemented.\n\n### Disclaimer:\n\nstratego? japonicus? promoterz? from which depth of hell do those names come? hehehe\n"
  },
  {
    "path": "stratego/__init__.py",
    "content": "# 1/bin/python\nfrom .import gekko_strategy\n"
  },
  {
    "path": "stratego/gekko_strategy.py",
    "content": "#!/bin/python\nimport os\nimport random\nimport hashlib\nimport re\nfrom collections import OrderedDict\n\n# from . import Settings\nfrom .indicator_properties import *\n\n# gekkoStratFolder = Settings('').Global['gekkoDir']+'/strategies/'\nsimplifyIndicators = lambda name: \"var {I} = this.indicators.{i};\".format(\n    i=name.lower(), I=name.upper()\n)\naddIndicatorText = lambda name: \"this.addIndicator('{i}', '{I}', this.settings.{I}{A});\".format(\n    i=name.lower(), I=name.upper(), A=IndicatorProperties[name]['input']\n)\nonlyLetters = lambda message: re.sub(r\"[^A-Za-z]+\", '', message)\n\n\nclass StrategyFileManager():\n\n    def __init__(self, gekkoPath, indicatorSettings):\n        self.gekkoStratFolder = gekkoPath + '/strategies/japonicus/'\n        self.gekkoIndicatorFolder = gekkoPath + '/strategies/indicators/'\n        if not os.path.isdir(self.gekkoStratFolder):\n            os.mkdir(self.gekkoStratFolder)\n        AllowedIndicators = list(IndicatorProperties.keys())\n        AllowedIndicators = [\n            ind for ind in AllowedIndicators if indicatorSettings[ind]['active']\n        ]\n        baseContent = open('stratego/skeleton/ontrend.js').read()\n        self.baseMD5 = hashlib.md5(baseContent.encode('utf-8')).hexdigest()\n        self.sessionCreatedFiles = []\n        self.skeletonHeader = [\n            l for l in baseContent.split('\\n') if '//JAPONICUS' in l\n        ][\n            0\n        ]\n        self.skeletonHeader = self.interpreteSkeletonHeader(self.skeletonHeader)\n        for I in range(len(AllowedIndicators)):\n            if not os.path.isfile(\n                \"%s%s.js\" % (self.gekkoIndicatorFolder, AllowedIndicators[I])\n            ):\n                print(\"Indicator %s doesn't exist!\" % AllowedIndicators[I])\n                AllowedIndicators[I] = None\n        self.AllowedIndicators = [x for x in AllowedIndicators if x]\n        if not self.AllowedIndicators:\n            exit(\"No usable indicators detected.\")\n\n    def selectIndicator(self, chosenIndicators, phenotype, Type):\n        indicatorsOnPhenotype = [\n            ind for ind in phenotype.keys() if ind in IndicatorProperties.keys()\n        ]\n        allOfType = [\n            ind\n            for ind in indicatorsOnPhenotype\n            if IndicatorProperties[ind]['group'] == Type\n        ]\n        Indicators = sorted(\n            allOfType, key= lambda ind: phenotype[ind]['active'], reverse=True\n        )\n        chosenIndicatorNames = [\n            chosenIndicators[name] for name in chosenIndicators.keys()\n        ]\n        for Ind in Indicators:\n            if Ind not in chosenIndicatorNames:\n                return Ind\n\n        raise RuntimeError(\"not enough indicators for strategy %s;\" % Indicators)\n\n    def checkStrategy(self, phenotype):\n        AllIndicators = self.AllowedIndicators\n        Indicators = {}\n        for indicatorInternalName in self.skeletonHeader.keys():\n            selectedIndicatorType = self.skeletonHeader[indicatorInternalName]\n            selectedIndicator = self.selectIndicator(\n                Indicators, phenotype, selectedIndicatorType\n            )\n            Indicators.update({indicatorInternalName: selectedIndicator})\n\n        def sortIndicators(ind):\n            if ind in phenotype.keys():\n                return phenotype[ind]['active']\n\n            else:\n                return 0\n\n        FallbackIndicators = [x for x in AllIndicators if x in phenotype.keys()]\n        if not Indicators:\n            Indicators = sorted(FallbackIndicators, key=sortIndicators, reverse=True)[\n                0:2\n            ]\n        if not Indicators:\n            exit(\"NO INDICATORS\")\n        IndicatorNames = [Indicators[slot] for slot in Indicators.keys()]\n        StrategyFileName = 'j' + self.baseMD5[-4:] + ''.join(IndicatorNames)\n        stratpath = self.gekkoStratFolder + StrategyFileName + '.js'\n        if not os.path.isfile(stratpath):\n            print(self.sessionCreatedFiles)\n            self.createStrategyFile(Indicators, stratpath)\n        return 'japonicus/' + StrategyFileName\n\n    def interpreteSkeletonHeader(self, header):\n        Header = OrderedDict()\n        header = header.replace('//JAPONICUS:', '')\n        for segment in header.split(','):\n            if '|' in segment:\n                segment = segment.strip(' ').split('|')\n                print(segment)\n                Header[segment[0]] = onlyLetters(segment[1].lower())\n        return Header\n\n    def createStrategyFile(self, Indicators, stratpath):\n        BASE = open(\"stratego/skeleton/ontrend.js\").read()\n        for Indicator in Indicators.keys():\n            BASE = BASE.replace(Indicator, Indicators[Indicator])\n        FILE = open(stratpath, 'w')\n        FILE.write(BASE)\n        print(\"Creating strategy %s file.\" % stratpath)\n        self.sessionCreatedFiles.append(stratpath)\n        FILE.close()\n\n    def _createStrategyFile(self, Indicators, stratpath):\n        BASE = open(\"stratego/skeleton/dumbsum.js\").read()\n        InitIndicators = [addIndicatorText(ind) for ind in Indicators]\n        BASE = BASE.replace(\"//ADD_INDICATORS;\", ('\\n'.join(InitIndicators)))\n        SimplifyIndicators = [simplifyIndicators(ind) for ind in Indicators]\n        BASE = BASE.replace(\"//SIMPLIFY_INDICATORS;\", ('\\n'.join(SimplifyIndicators)))\n        BuyConditions = []\n        SellConditions = []\n        for ind in Indicators:\n            Bc = \"%s.%s %s\" % (\n                ind,\n                IndicatorProperties[ind]['attrname'],\n                IndicatorProperties[ind]['result'][0].format(i=ind),\n            )\n            Sc = \"%s.%s %s\" % (\n                ind,\n                IndicatorProperties[ind]['attrname'],\n                IndicatorProperties[ind]['result'][1].format(i=ind),\n            )\n            BuyConditions.append(Bc)\n            SellConditions.append(Sc)\n        BASE = BASE.replace(\n            \"//BUYCONDITIONS;\", \"var BuyConditions = [%s];\" % ', '.join(BuyConditions)\n        )\n        BASE = BASE.replace(\n            \"//SELLCONDITIONS;\",\n            \"var SellConditions = [%s];\" % ', '.join(SellConditions),\n        )\n        FILE = open(stratpath, 'w')\n        FILE.write(BASE)\n        print(\"Creating strategy %s file.\" % stratpath)\n        self.sessionCreatedFiles.append(stratpath)\n        FILE.close()\n"
  },
  {
    "path": "stratego/indicator_properties.py",
    "content": "#!/bin/python\nstdResult = [\"> this.settings.{i}.thresholds.up\", \"< this.settings.{i}.thresholds.down\"]\nagainstPrice = [\"> price\", \"< price\"]\nReverse = lambda x: [x[1], x[0]]\nIndicatorProperties = {\n    \"ADX\": {\n        \"input\": '', \"attrname\": \"result\", \"result\": stdResult, \"group\": \"momentum\"\n    },\n    \"ATR\": {\n        \"input\": '', \"attrname\": \"result\", \"result\": stdResult, \"group\": \"volatility\"\n    },\n    \"PPO\": {\n        \"input\": '', \"attrname\": \"PPOhist\", \"result\": stdResult, \"group\": \"momentum\"\n    },\n    \"DEMA\": {\"attrname\": \"result\", \"result\": stdResult, \"input\": '', \"group\": \"trend\"},\n    \"RSI\": {\n        \"result\": Reverse(stdResult),\n        \"input\": '',\n        \"attrname\": \"result\",\n        \"group\": \"momentum\",\n    },\n    \"TSI\": {\n        \"input\": '', \"result\": stdResult, \"attrname\": \"result\", \"group\": \"momentum\"\n    },\n    \"LRC\": {\n        \"result\": againstPrice,\n        \"attrname\": \"result\",\n        \"input\": '.depth',\n        \"group\": \"trend\",\n    },\n    \"SMMA\": {\n        \"input\": '', \"attrname\": 'result', \"result\": stdResult, \"group\": \"overlap\"\n    },\n    \"CCI\": {\n        \"input\": '', \"result\": stdResult, \"attrname\": 'result', \"group\": \"momentum\"\n    },\n}\n"
  },
  {
    "path": "stratego/skeleton/dumbsum.js",
    "content": "// helpers\nvar _ = require('lodash');\nvar log = require('../../core/log.js');\n\n// let's create our own method\nvar method = {};\n\nmethod.init = function() {\n\n    this.age = 0;\n\n    this.currentTrend;\n    this.requiredHistory = 16;\n    this.persistence=0;\n    //ADD_INDICATORS;\n    this.addindicator('inda', '..INDA..', this.settings['..INDA..'])\n}\n\n// what happens on every new candle?\nmethod.update = function(candle) {\n\n}\n\n\nmethod.log = function() {\n\n}\n\n\n\nmethod.validation = function(ConditionList)\n    {\n       var validNB = ConditionList.filter(function(s) { return s; }).length;\n       return validNB/ ConditionList.length; \n    }\n\nmethod.checkPersistence = function(candidateAdvice)\n{\n    if (this.persistence >= this.settings.persistence)\n        this.advice(candidateAdvice);\n    else\n        this.advice();\n\n}\n\nmethod.check = function(candle) {\n\n    var price = candle.close;\n\n\n    //SIMPLIFY_INDICATORS;\n\n    //BUYCONDITIONS;\n    //SELLCONDITIONS;\n \n    this.age++;\n \n\n    if (this.validation(BuyConditions) > 0.6)\n    {\n\n        if(this.currentTrend !== 'up') {\n            this.currentTrend = 'up';\n            this.advice();\n            this.persistence=0;\n        } else{\n            this.persistence++;\n            this.checkPersistence('long');\n\n        }\n    }\n    else if (this.validation(SellConditions) > 0.6)\n    {\n\n\n        if (this.currentTrend !== 'down') {\n            this.currentTrend = 'down';\n            this.advice();\n            this.persistence=0;\n        } else{\n            this.persistence++;\n            this.checkPersistence('short');\n        }\n\n\n    } else {\n\n        this.advice();\n    }\n\n\n}\n\nmodule.exports = method;\n"
  },
  {
    "path": "stratego/skeleton/ontrend.js",
    "content": "/*\n  skeleton adapted from former strategy:\n\n\tRSI Bull and Bear + ADX modifier\n\t1. Use different RSI-strategies depending on a longer trend\n\t2. But modify this slighly if shorter BULL/BEAR is detected\n\t-\n\t12 feb 2017\n\t-\n\t(CC-BY-SA 4.0) Tommie Hansen\n\thttps://creativecommons.org/licenses/by-sa/4.0/\n*/\n\n// req's\nvar log = require ('../../core/log.js');\nvar config = require ('../../core/util.js').getConfig();\n\n// strategy\nvar strat = {\n\t\n\t/* INIT */\n\tinit: function()\n\t{\n\t\tthis.name = 'RSI Bull and Bear ADX';\n\t\tthis.requiredHistory = 10//config.tradingAdvisor.historySize;\n\t\tthis.resetTrend();\n\t\t\n\t\t// debug? set to flase to disable all logging/messages/stats (improves performance)\n\t\tthis.debug = true;\n\t\t\n\t\t// performance\n\t\t//config.backtest.batchSize = 1000; // increase performance\n\t\t//config.silent = true;\n\t\t//config.debug = false;\n\n      //JAPONICUS:BULLMOM|MOMENTUM,BEARMOM|MOMENTUM,SECMOM|MOMENTUM;\n\n\t\t// SMA\n\t\tthis.addIndicator('maSlow', 'SMA', this.settings.SMA_long );\n\t\tthis.addIndicator('maFast', 'SMA', this.settings.SMA_short );\n\t\t\n\t\t// RSI\n\t  this.addIndicator('BULL_momentum', 'BULLMOM', this.settings['BULLMOM'] );\n\t\tthis.addIndicator('BEAR_momentum', 'BEARMOM', this.settings['BEARMOM'] );\n\t\t\n\t\t// ADX\n\t\tthis.addIndicator('secondary_momentum', 'SECMOM', this.settings['SECMOM'] )\n\t\t\n\t\t\n\t\t// debug stuff\n\t\tthis.startTime = new Date();\n\t\t\n\t\t// add min/max if debug\n\t\tif( this.debug ){\n\t\t\tthis.stat = {\n\t\t\t\tadx: { min: 1000, max: 0 },\n\t\t\t\tbear: { min: 1000, max: 0 },\n\t\t\t\tbull: { min: 1000, max: 0 }\n\t\t\t};\n\t\t}\n\t\t\n\t}, // init()\n\t\n\t\n\t/* RESET TREND */\n\tresetTrend: function()\n\t{\n\t\tvar trend = {\n\t\t\tduration: 0,\n\t\t\tdirection: 'none',\n\t\t\tlongPos: false,\n\t\t};\n\t\n\t\tthis.trend = trend;\n\t},\n\t\n\t\n\n\t\n\t\n\t/* CHECK */\n\tcheck: function()\n\t{\n\t\t// get all indicators\n\t\tlet ind = this.indicators,\n\t\t\tmaSlow = ind.maSlow.result,\n\t\t\tmaFast = ind.maFast.result,\n\t\t\tsec = this.indicators.secondary_momentum.result;\n\t\t\n\t\t\n\t\t\t\n\t\t// BEAR TREND\n\t\tif( maFast < maSlow )\n\t\t{\n\t\t\tvar momentum = ind.BEAR_momentum.result;\n\t\t\tlet momentum_hi = this.settings['BEARMOM'].thresholds.up,\n\t\t\t\tmomentum_low = this.settings['BEARMOM'].thresholds.down;\n\t\t\t\n\t\t\t// ADX trend strength?\n\t\t\tif( sec > this.settings['SECMOM'].thresholds.up ) momentum_hi = momentum_hi + 15;\n\t\t\telse if( sec < this.settings['SECMOM'].thresholds.down ) momentum_low = momentum_low -5;\n\t\t\t\t\n\t\t\tif( momentum > momentum_hi ) this.short();\n\t\t\telse if( momentum < momentum_low ) this.long();\n\t\t\t\n\n\t\t}\n\n\t\t// BULL TREND\n\t\telse\n\t\t{\n\t\t\tvar momentum = ind.BULL_momentum.result;\n\t\t\tlet momentum_hi = this.settings['BULLMOM'].thresholds.up,\n\t\t\t\tmomentum_low = this.settings['BULLMOM'].thresholds.down;\n\n\t\t\t// ADX trend strength?\n\t\t\tif( sec > this.settings['SECMOM'].thresholds.up ) momentum_hi = momentum_hi + 5;\n\t\t\telse if( sec < this.settings['SECMOM'].thresholds.down ) momentum_low = momentum_low -5;\n\n\t\t\tif( momentum > momentum_hi ) this.short();\n\t\t\t  else if( momentum < momentum_low )  this.long();\n\t\t\t\n\t\t}\n\t\t\n\t\t// add adx low/high if debug\n\n\t\n\t}, // check()\n\t\n\t\n\t/* LONG */\n\tlong: function()\n\t{\n\t\tif( this.trend.direction !== 'up' ) // new trend? (only act on new trends)\n\t\t{\n\t\t\tthis.resetTrend();\n\t\t\tthis.trend.direction = 'up';\n\t\t\tthis.advice('long');\n\t\t\tif( this.debug ) log.info('Going long');\n\t\t}\n\t\t\n\t\tif( this.debug )\n\t\t{\n\t\t\tthis.trend.duration++;\n\t\t\tlog.info('Long since', this.trend.duration, 'candle(s)');\n\t\t}\n\t},\n\t\n\t\n\t/* SHORT */\n\tshort: function()\n\t{\n\t\t// new trend? (else do things)\n\t\tif( this.trend.direction !== 'down' )\n\t\t{\n\t\t\tthis.resetTrend();\n\t\t\tthis.trend.direction = 'down';\n\t\t\tthis.advice('short');\n\t\t\tif( this.debug ) log.info('Going short');\n\t\t}\n\t\t\n\t\tif( this.debug )\n\t\t{\n\t\t\tthis.trend.duration++;\n\t\t\tlog.info('Short since', this.trend.duration, 'candle(s)');\n\t\t}\n\t},\n\t\n\t\n\t/* END backtest */\n\tend: function()\n\t{\n\t\tlet seconds = ((new Date()- this.startTime)/1000),\n\t\t\tminutes = seconds/60,\n\t\t\tstr;\n\t\t\t\n\t\tminutes < 1 ? str = seconds.toFixed(2) + ' seconds' : str = minutes.toFixed(2) + ' minutes';\n\t\t\n\t\tlog.info('====================================');\n\t\tlog.info('Finished in ' + str);\n\t\tlog.info('====================================');\n\t\n\t\t// print stats and messages if debug\n\t\tif(this.debug)\n\t\t{\n\t\t\tlet stat = this.stat;\n\t\t\tlog.info('BEAR RSI low/high: ' + stat.bear.min + ' / ' + stat.bear.max);\n\t\t\tlog.info('BULL RSI low/high: ' + stat.bull.min + ' / ' + stat.bull.max);\n\t\t\tlog.info('ADX min/max: ' + stat.adx.min + ' / ' + stat.adx.max);\n\t\t}\n\t\t\n\t}\n\t\n};\n\nmodule.exports = strat;\n"
  },
  {
    "path": "strategy_parameters/BBRSI.toml",
    "content": "interval = 14\n\n[thresholds]\nlow = 40\nhigh = 40\npersistence = 9\n\n[bbands]\nTimePeriod = 20\nNbDevUp = 0.7\nNbDevDn = 0.7"
  },
  {
    "path": "strategy_parameters/DUAL_RSI_BULL_BEAR.toml",
    "content": "[RBB1]\nSMA_long = 1000\nSMA_short = 50\n#BULL\nBULL_RSI = 10\nBULL_RSI_high = 80\nBULL_RSI_low = 60\n#BEAR   \nBEAR_RSI = 15\nBEAR_RSI_high = 60\nBEAR_RSI_low = 20\n#ADX   \nADX = 3\nADX_high = 70\nADX_low = 50\n\n[RBB2]\nSMA_long = 1000\nSMA_short = 50\n#BULL\nBULL_RSI = 10\nBULL_RSI_high = 80\nBULL_RSI_low = 60\n#BEAR   \nBEAR_RSI = 15\nBEAR_RSI_high = 60\nBEAR_RSI_low = 20\n#ADX   \nADX = 3\nADX_high = 70\nADX_low = 50\n"
  },
  {
    "path": "strategy_parameters/HL_TS.toml",
    "content": "# Minimum volume needed to enter a trade\nMin24hUSDVolume = 500000\nRollingVolumeHours = 24\nCurrencyPrice = 9000\nCandleSize = 5\n\n# Do not enter in bearish trend\n# if Med>Slow (bullish) \n# else if Fast>Med (slightly bullish in bearish trend)\nSMA_Fast = 50\nSMA_Medium = 200\nSMA_Slow = 500\n\nDarvasPeriodSize = 24\nNoTradeResetPeriod = 3\n\nStopLossPercent = -5\nMaxLongPositionHours = 12\n\n[psar]\noptInStart = 0.0\noptInAcceleration = 0.25\noptInMaximum = 0.5\n"
  },
  {
    "path": "strategy_parameters/NEO.toml",
    "content": "# Source: https://raw.githubusercontent.com/gcobs0834/gekko/develop/config/strategies/NEO.toml\n# SMA Trends\nSMA_long = 150\nSMA_short = 40\n\n# BULL\nBULL_RSI = 10\nBULL_RSI_high = 80\nBULL_RSI_low = 50\n\n# IDLE\nIDLE_RSI = 12\nIDLE_RSI_high = 65\nIDLE_RSI_low = 39\n\n# BEAR\nBEAR_RSI = 15\nBEAR_RSI_high = 50\nBEAR_RSI_low = 25\n\n# ROC\nROC = 6\nROC_lvl = 0\n\n# BULL/BEAR is defined by the longer SMA trends\n# if SHORT over LONG = BULL\n# if SHORT under LONG = BEAR\n\n# ROC is the LENGHT (averaging)\n# Leave ROC_lvl at 0 otherwise Results are negative\n\n\n"
  },
  {
    "path": "strategy_parameters/NEObigjap.toml",
    "content": "# SETTINGS FOUND FOR NEO STRAT AT EPOCH 15.000;\n# INTERNAL BACKTESTS RETURNED GREAT SCORE;\nBEAR_RSI_low = 23.106060606060606\nIDLE_RSI_high = 59.68181818181818\nBEAR_RSI = 11.863636363636363\nBULL_RSI_low = 47.121212121212125\nSMA_short = 36.0\nIDLE_RSI = 11.527272727272727\nROC = 4.781818181818182\nIDLE_RSI_low = 34.154545454545456\nBULL_RSI_high = 84.12121212121212\nROC_lvl = 0.0\nBULL_RSI = 9.121212121212121\nBEAR_RSI_high = 42.878787878787875\nSMA_long = 145.0\n\n"
  },
  {
    "path": "strategy_parameters/PPO.toml",
    "content": "short = 12\nlong = 26\nsignal = 9\n\n[thresholds]\ndown = -0.025\nup = 0.025\npersistence = 2"
  },
  {
    "path": "strategy_parameters/RBB_ADX2_BB.toml",
    "content": "[ADX]\nadx = 3.0\nhigh = 50\n\n[BBands]\nNbDevDn = 2.0\nNbDevUp = 2.0\nTimePeriod = 20.0\n\n[BBtrend]\nbearPersistence = 16\nbullPersistence = 11\nlowerThreshold = 50\nupperThreshold = 86\n\n[BEAR]\nhigh = 60.0\nlow = 29.4\nmod_high = 1.5\nmod_low = -1.5\nrsi = 9.2\n\n[BULL]\nhigh = 90\nlow = 37\nmod_high = 6\nmod_low = -13.5\nrsi = 13.2\n\n[SMA]\nlong = 1000.0\nshort = 50.0"
  },
  {
    "path": "strategy_parameters/RSI_BULL_BEAR.toml",
    "content": "SMA_long = [800, 1000]\nSMA_short = 50\n\nBULL_RSI = 10\nBULL_RSI_high = 80\nBULL_RSI_low = 60\n        \nBEAR_RSI = 15\nBEAR_RSI_high = 60\nBEAR_RSI_low = 20\n        \nADX = 3\nADX_high = 70\nADX_low = 50\n"
  },
  {
    "path": "strategy_parameters/RSI_BULL_BEAR_ADX.toml",
    "content": "SMA_long = 1000\nSMA_short = 50\n\nBULL_RSI = 10\nBULL_RSI_high = 80\nBULL_RSI_low = 60\n        \nBEAR_RSI = 15\nBEAR_RSI_high = 60\nBEAR_RSI_low = 20\n        \nADX = 3\nADX_high = 70\nADX_low = 50\n\nshort = 10\nlow = 80\nmod = 20\n\n"
  },
  {
    "path": "strategy_parameters/RSI_BULL_BEAR_x2.toml",
    "content": "# MAJOR SMA TRENDS\nMAJOR_SMA_long = 2000\nMAJOR_SMA_short = 500\n\n# MAJOR BULL TREND\n# SMA Trends\nBULL__SMA_long = 1000\nBULL__SMA_short = 50\n# BULL\nBULL__BULL_RSI = 10\nBULL__BULL_RSI_high = 80\nBULL__BULL_RSI_low = 60\n# BEAR\nBULL__BEAR_RSI = 15\nBULL__BEAR_RSI_high = 50\nBULL__BEAR_RSI_low = 20\n\n# MAJOR BEAR TREND\n# SMA Trends\nBEAR__SMA_long = 1000\nBEAR__SMA_short = 50\n# BULL\nBEAR__BULL_RSI = 10\nBEAR__BULL_RSI_high = 80\nBEAR__BULL_RSI_low = 60\n# BEAR\nBEAR__BEAR_RSI = 15\nBEAR__BEAR_RSI_high = 50\nBEAR__BEAR_RSI_low = 20\n\n"
  },
  {
    "path": "strategy_parameters/WRSI_BULL_BEAR.toml",
    "content": "SMA_long = 1000\nSMA_short = 50\n\nBULL_RSI = 10\nBULL_RSI_high = 80\nBULL_RSI_low = 60\n        \nBEAR_RSI = 15\nBEAR_RSI_high = 60\nBEAR_RSI_low = 20\n        \nADX = 3\nADX_high = 70\nADX_low = 50\n"
  },
  {
    "path": "strategy_parameters/foxhole.toml",
    "content": "P0 = [-65.536, 65.536]\nP1 = [-65.536, 65.536]\n"
  },
  {
    "path": "strategy_parameters/griewangk.toml",
    "content": "P0 = [-600, 600]\nP1 = [-600, 600]\nP2 = [-600, 600]\nP3 = [-600, 600]\nP4 = [-600, 600]\nP5 = [-600, 600]\nP6 = [-600, 600]\nP7 = [-600, 600]\nP8 = [-600, 600]\nP9 = [-600, 600]\n"
  },
  {
    "path": "strategy_parameters/quartic.toml",
    "content": "P0 = [-1.28, 1.28]\nP1 = [-1.28, 1.28]\nP2 = [-1.28, 1.28]\nP3 = [-1.28, 1.28]\nP4 = [-1.28, 1.28]\nP5 = [-1.28, 1.28]\nP6 = [-1.28, 1.28]\nP7 = [-1.28, 1.28]\nP8 = [-1.28, 1.28]\nP9 = [-1.28, 1.28]\nP10 = [-1.28, 1.28]\nP11 = [-1.28, 1.28]\nP12 = [-1.28, 1.28]\nP13 = [-1.28, 1.28]\nP14 = [-1.28, 1.28]\nP15 = [-1.28, 1.28]\nP16 = [-1.28, 1.28]\nP17 = [-1.28, 1.28]\nP18 = [-1.28, 1.28]\nP19 = [-1.28, 1.28]\nP20 = [-1.28, 1.28]\nP21 = [-1.28, 1.28]\nP22 = [-1.28, 1.28]\nP23 = [-1.28, 1.28]\nP24 = [-1.28, 1.28]\nP25 = [-1.28, 1.28]\nP26 = [-1.28, 1.28]\nP27 = [-1.28, 1.28]\nP28 = [-1.28, 1.28]\nP29 = [-1.28, 1.28]\n"
  },
  {
    "path": "strategy_parameters/rastrigin.toml",
    "content": "P0 = [-5.12, 5.12]\nP1 = [-5.12, 5.12]\nP2 = [-5.12, 5.12]\nP3 = [-5.12, 5.12]\nP4 = [-5.12, 5.12]\nP5 = [-5.12, 5.12]\nP6 = [-5.12, 5.12]\nP7 = [-5.12, 5.12]\nP8 = [-5.12, 5.12]\nP9 = [-5.12, 5.12]\nP10 = [-5.12, 5.12]\nP11 = [-5.12, 5.12]\nP12 = [-5.12, 5.12]\nP13 = [-5.12, 5.12]\nP14 = [-5.12, 5.12]\nP15 = [-5.12, 5.12]\nP16 = [-5.12, 5.12]\nP17 = [-5.12, 5.12]\nP18 = [-5.12, 5.12]\nP19 = [-5.12, 5.12]\n"
  },
  {
    "path": "strategy_parameters/rosenbrock.toml",
    "content": "P0 = [-2.048, 2.048]\nP1 = [-2.048, 2.048]\n"
  },
  {
    "path": "strategy_parameters/scalperNEO.toml",
    "content": "# SMA Trends\nSMA_long = 150\nSMA_short = 40\n\n# BULL\nBULL_RSI = 10\nBULL_RSI_high = 80\nBULL_RSI_low = 50\n\n# IDLE\nIDLE_RSI = 12\nIDLE_RSI_high = 65\nIDLE_RSI_low = 39\n\n# BEAR\nBEAR_RSI = 15\nBEAR_RSI_high = 50\nBEAR_RSI_low = 25\n\n# ROC\nROC = 6\nROC_lvl = 0\nscalperDelay = 7\nscalperThresholdPercent = 2\n"
  },
  {
    "path": "strategy_parameters/scalperRBBA.toml",
    "content": "SMA_long = 1000\nSMA_short = 50\n\nBULL_RSI = 10\nBULL_RSI_high = 80\nBULL_RSI_low = 60\n        \nBEAR_RSI = 15\nBEAR_RSI_high = 60\nBEAR_RSI_low = 20\n        \nADX = 3\nADX_high = 70\nADX_low = 50\n\nscalperDelay = 7\nscalperThresholdPercent = 2\n"
  },
  {
    "path": "strategy_parameters/schwefel.toml",
    "content": "P0 = [-500, 500]\nP1 = [-500, 500]\nP2 = [-500, 500]\nP3 = [-500, 500]\nP4 = [-500, 500]\nP5 = [-500, 500]\nP6 = [-500, 500]\nP7 = [-500, 500]\nP8 = [-500, 500]\nP9 = [-500, 500]\n"
  },
  {
    "path": "utilities/importer.sh",
    "content": "#!/bin/bash\n\n# To run GAs one needs candlestick datasets to backtest.\n# Grabbing that data on a VPS can be a pain, so thats an automated tool that grabs some interesting datasets;\n\nGekkoPath=\"${HOME}/gekko\"\njaponicusRelativeToGekko=\"../japonicus\"\nconfigs=($(ls|grep \".js\"))\necho $configs\n\nfor conf in \"${configs[@]}\"\ndo\nnode ${GekkoPath}/gekko.js -i -c ${japonicusRelativeToGekko}/utilities/${conf}\ndone\n\n\n\n"
  },
  {
    "path": "utilities/poloUSDTBTC.js",
    "content": "// Everything is explained here:\n// @link https://gekko.wizb.it/docs/commandline/plugins.html\n\nvar config = {};\n\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//                          GENERAL SETTINGS\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nconfig.trader={};\nconfig.debug = true; // for additional logging / debugging\n\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//                         WATCHING A MARKET\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nconfig.watch = {\n\n  // see https://gekko.wizb.it/docs/introduction/supported_exchanges.html\n  exchange: 'poloniex',\n  currency: 'USDT',\n  asset: 'BTC',\n\n }\n\nconfig.adapter = 'sqlite';\n\nconfig.sqlite = {\n  path: 'plugins/sqlite',\n\n  dataDirectory: 'history',\n  version: 0.1,\n\n  journalMode: 'WAL', // setting this to 'DEL' may prevent db locking on windows\n\n  dependencies: []\n}\n\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//                       CONFIGURING IMPORTING\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nconfig.importer = {\n  daterange: {\n    // NOTE: these dates are in UTC\n    from: \"2017-01-01 00:00:00\"\n  }\n}\n\nconfig.candleWriter = {\n\tenabled: true,\n}\n\n// set this to true if you understand that Gekko will\n// invest according to how you configured the indicators.\n// None of the advice in the output is Gekko telling you\n// to take a certain position. Instead it is the result\n// of running the indicators you configured automatically.\n//\n// In other words: Gekko automates your trading strategies,\n// it doesn't advice on itself, only set to true if you truly\n// understand this.\n//\n// Not sure? Read this first: https://github.com/askmike/gekko/issues/201\nconfig['I understand that Gekko only automates MY OWN trading strategies'] = false;\n\nmodule.exports = config;\n"
  },
  {
    "path": "utilities/poloUSDTETH.js",
    "content": "// Everything is explained here:\n// @link https://gekko.wizb.it/docs/commandline/plugins.html\n\nvar config = {};\n\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//                          GENERAL SETTINGS\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nconfig.trader={};\nconfig.debug = true; // for additional logging / debugging\n\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//                         WATCHING A MARKET\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nconfig.watch = {\n\n  // see https://gekko.wizb.it/docs/introduction/supported_exchanges.html\n  exchange: 'poloniex',\n  currency: 'USDT',\n  asset: 'ETH',\n\n }\n\nconfig.adapter = 'sqlite';\n\nconfig.sqlite = {\n  path: 'plugins/sqlite',\n\n  dataDirectory: 'history',\n  version: 0.1,\n\n  journalMode: 'WAL', // setting this to 'DEL' may prevent db locking on windows\n\n  dependencies: []\n}\n\n\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//                       CONFIGURING IMPORTING\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nconfig.importer = {\n  daterange: {\n    // NOTE: these dates are in UTC\n    from: \"2017-01-01 00:00:00\"\n  }\n}\n\nconfig.candleWriter = {\n\tenabled: true,\n}\n\n// set this to true if you understand that Gekko will\n// invest according to how you configured the indicators.\n// None of the advice in the output is Gekko telling you\n// to take a certain position. Instead it is the result\n// of running the indicators you configured automatically.\n//\n// In other words: Gekko automates your trading strategies,\n// it doesn't advice on itself, only set to true if you truly\n// understand this.\n//\n// Not sure? Read this first: https://github.com/askmike/gekko/issues/201\nconfig['I understand that Gekko only automates MY OWN trading strategies'] = false;\n\nmodule.exports = config;\n"
  },
  {
    "path": "utilities/poloUSDTLTC.js",
    "content": "// Everything is explained here:\n// @link https://gekko.wizb.it/docs/commandline/plugins.html\n\nvar config = {};\n\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//                          GENERAL SETTINGS\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nconfig.trader={};\nconfig.debug = true; // for additional logging / debugging\n\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//                         WATCHING A MARKET\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nconfig.watch = {\n\n  // see https://gekko.wizb.it/docs/introduction/supported_exchanges.html\n  exchange: 'poloniex',\n  currency: 'USDT',\n  asset: 'LTC',\n\n }\n\n\nconfig.adapter = 'sqlite';\n\nconfig.sqlite = {\n  path: 'plugins/sqlite',\n\n  dataDirectory: 'history',\n  version: 0.1,\n\n  journalMode: 'WAL', // setting this to 'DEL' may prevent db locking on windows\n\n  dependencies: []\n}\n\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n//                       CONFIGURING IMPORTING\n// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nconfig.importer = {\n  daterange: {\n    // NOTE: these dates are in UTC\n    from: \"2017-01-01 00:00:00\"\n  }\n}\n\nconfig.candleWriter = {\n\tenabled: true,\n}\n\n// set this to true if you understand that Gekko will\n// invest according to how you configured the indicators.\n// None of the advice in the output is Gekko telling you\n// to take a certain position. Instead it is the result\n// of running the indicators you configured automatically.\n//\n// In other words: Gekko automates your trading strategies,\n// it doesn't advice on itself, only set to true if you truly\n// understand this.\n//\n// Not sure? Read this first: https://github.com/askmike/gekko/issues/201\nconfig['I understand that Gekko only automates MY OWN trading strategies'] = false;\n\nmodule.exports = config;\n"
  },
  {
    "path": "version.py",
    "content": "#!/bin/python\nVERSION = 0.92\n"
  }
]