Repository: Gab0/japonicus
Branch: master
Commit: af4aaf74f8b2
Files: 104
Total size: 179.0 KB
Directory structure:
gitextract_koczj845/
├── .gitignore
├── Dockerfile
├── LICENSE
├── README.md
├── docker-compose.yaml
├── evaluation/
│ ├── __init__.py
│ ├── benchmark/
│ │ ├── __init__.py
│ │ ├── benchmark.py
│ │ └── generateConfig.py
│ └── gekko/
│ ├── API.py
│ ├── __init__.py
│ ├── backtest.py
│ ├── dataset.py
│ ├── datasetOperations.py
│ └── statistics.py
├── exchangerun.csv
├── gekko_evolution.yml
├── japonicus/
│ ├── Settings.py
│ ├── __init__.py
│ ├── configIndicators.py
│ ├── configStrategies.py
│ ├── evolution_generations.py
│ ├── halt.py
│ ├── interface.py
│ ├── japonicus.py
│ └── options.py
├── japonicus-run
├── jlivetrader.py
├── livetrader/
│ ├── exchangeMonitor.py
│ ├── gekkoChecker.py
│ ├── gekkoTrigger.py
│ ├── japonicusResultSelector.py
│ └── strategyRanker.py
├── promoterz/
│ ├── README.md
│ ├── TOMLutils.py
│ ├── __init__.py
│ ├── environment.py
│ ├── evaluationBreak.py
│ ├── evaluationPool.py
│ ├── evolutionHooks.py
│ ├── evolutionToolbox.py
│ ├── functions.py
│ ├── locale.py
│ ├── logAnalysis.py
│ ├── logger.py
│ ├── metaPromoterz.py
│ ├── parameterOperations.py
│ ├── representation/
│ │ ├── Creator.py
│ │ ├── chromosome.py
│ │ ├── deapCreator.py
│ │ └── oldschool.py
│ ├── sequence/
│ │ ├── __init__.py
│ │ ├── locale/
│ │ │ └── standard_loop.py
│ │ └── world/
│ │ └── parallel_world.py
│ ├── statistics.py
│ ├── supplement/
│ │ ├── PRoFIGA.py
│ │ ├── age.py
│ │ └── phenotypicDivergence.py
│ ├── validation.py
│ ├── webServer/
│ │ ├── __init__.py
│ │ ├── core.py
│ │ ├── external_css_list.txt
│ │ ├── graphs.py
│ │ ├── layout.py
│ │ └── promoterz_style.css
│ └── world.py
├── requirements.txt
├── settings/
│ ├── _Global.toml
│ ├── _backtest.toml
│ ├── _bayesian.toml
│ ├── _binance.toml
│ ├── _dataset.toml
│ ├── _evalbreak.toml
│ └── _generation.toml
├── stratego/
│ ├── README.md
│ ├── __init__.py
│ ├── gekko_strategy.py
│ ├── indicator_properties.py
│ └── skeleton/
│ ├── dumbsum.js
│ └── ontrend.js
├── strategy_parameters/
│ ├── BBRSI.toml
│ ├── DUAL_RSI_BULL_BEAR.toml
│ ├── HL_TS.toml
│ ├── NEO.toml
│ ├── NEObigjap.toml
│ ├── PPO.toml
│ ├── RBB_ADX2_BB.toml
│ ├── RSI_BULL_BEAR.toml
│ ├── RSI_BULL_BEAR_ADX.toml
│ ├── RSI_BULL_BEAR_x2.toml
│ ├── WRSI_BULL_BEAR.toml
│ ├── foxhole.toml
│ ├── griewangk.toml
│ ├── quartic.toml
│ ├── rastrigin.toml
│ ├── rosenbrock.toml
│ ├── scalperNEO.toml
│ ├── scalperRBBA.toml
│ └── schwefel.toml
├── utilities/
│ ├── importer.sh
│ ├── poloUSDTBTC.js
│ ├── poloUSDTETH.js
│ └── poloUSDTLTC.js
└── version.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
### https://raw.github.com/github/gitignore/1a92ca87b787b6a0c68eb10371ebd0bfd823db9b/Global/Windows.gitignore
# Windows image file caches
Thumbs.db
ehthumbs.db
# Folder config file
Desktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msm
*.msp
# Windows shortcuts
*.lnk
### https://raw.github.com/github/gitignore/1a92ca87b787b6a0c68eb10371ebd0bfd823db9b/Global/Linux.gitignore
*~
# temporary files which can be created if a process still has a handle open of a deleted file
.fuse_hidden*
# KDE directory preferences
.directory
# Linux trash folder which might appear on any partition or disk
.Trash-*
# .nfs files are created when an open file is removed but is still being accessed
.nfs*
### https://raw.github.com/github/gitignore/1a92ca87b787b6a0c68eb10371ebd0bfd823db9b/Global/macOS.gitignore
*.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
### https://raw.github.com/github/gitignore/1a92ca87b787b6a0c68eb10371ebd0bfd823db9b/Global/Emacs.gitignore
# -*- mode: gitignore; -*-
*~
\#*\#
/.emacs.desktop
/.emacs.desktop.lock
*.elc
auto-save-list
tramp
.\#*
# Org-mode
.org-id-locations
*_archive
# flymake-mode
*_flymake.*
# eshell files
/eshell/history
/eshell/lastdir
# elpa packages
/elpa/
# reftex files
*.rel
# AUCTeX auto folder
/auto/
# cask packages
.cask/
dist/
# Flycheck
flycheck_*.el
# server auth directory
/server/
# projectiles files
.projectile
# directory configuration
.dir-locals.el
### https://raw.github.com/github/gitignore/1a92ca87b787b6a0c68eb10371ebd0bfd823db9b/Global/Vim.gitignore
# swap
[._]*.s[a-w][a-z]
[._]s[a-w][a-z]
# session
Session.vim
# temporary
.netrwhist
*~
# auto-generated tag files
tags
### https://raw.github.com/github/gitignore/1a92ca87b787b6a0c68eb10371ebd0bfd823db9b/Global/Tags.gitignore
# Ignore tags created by etags, ctags, gtags (GNU global) and cscope
TAGS
.TAGS
!TAGS/
tags
.tags
!tags/
gtags.files
GTAGS
GRTAGS
GPATH
GSYMS
cscope.files
cscope.out
cscope.in.out
cscope.po.out
### https://raw.github.com/github/gitignore/1a92ca87b787b6a0c68eb10371ebd0bfd823db9b/python.gitignore
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# dotenv
.env
# virtualenv
.venv/
venv/
ENV/
# Spyder project settings
.spyderproject
# Rope project settings
.ropeproject
### gekkoJaponicus
output
src
================================================
FILE: Dockerfile
================================================
FROM python:3.6.6-jessie
ENV LANG en_US.UTF-8
# install dependencies;
#RUN apt-get update -y
#RUN apt-get install software-properties-common python-software-properties -y
RUN apt-get update -y
RUN apt-get upgrade -y
RUN apt-get install python3-pip python3-numpy -y
RUN pip3.6 install --upgrade pip
COPY ./requirements.txt /opt/japonicus/requirements.txt
# those are required to build other python modules, so install first;
RUN pip3.6 install numpy cython pandas
RUN pip3.6 install --ignore-installed -r /opt/japonicus/requirements.txt
WORKDIR /opt/japonicus/
COPY . /opt/japonicus
EXPOSE 5000
RUN python3.6 --version
ENTRYPOINT ["python3.6", "/opt/japonicus/japonicus-run"]
CMD ["python3.6", "/opt/japonicus/japonicus-run", "--help"]
================================================
FILE: LICENSE
================================================
The MIT License (MIT)
Copyright (c) 2014-2017 Mike van Rossum mike@mvr.me
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
================================================
FILE: README.md
================================================
### What is japonicus and what it does
This is an implementation of genetic algorithm & bayesian evolution to develop strategies for digital coin trading bot Gekko.
So you make a good strat, or get one from the internetz. Make sure its good, because this is not about miracles.
If you get good profit on strat standard settings or some random settings you made up, japonicus can find some setting set that improves the strategy, on some specific market/currency or overall.
Discord Group: `https://discord.gg/kYKHXnV`
## Instructions
Japonicus works on `python>=3.6`!
Check wiki for instructions on setup, workflow, methods, etc.
## Disclaimer
No matter how many years your training candles span or how convoluted or simple is your strategy,
your strategies/parameters that were profitable on backtest runs probably won't translate well to live trading.
Altough japonicus is a fairly competent GA and will find a capable set of parameters in a few epoches,
we're yet to discover why the live trading runs seem an environment completely unrelated to backtest env, where
strategies/parameters that seemed good offline turn into live daily losses or below market gains.
You can send me a few coins to help me develop some ideas for binance trading bots,
as this kind of research involves a steady negative profit so I cannot sustain it indefinitely...
If something starts working I will share here or in our discord group. Those are my binance wallets:
```
LTC LVhThMzJMC6aKBcA1KX4q3yk2ryBjfPGfH
ETH 0xceaa9bb655ed80ba36b55532fdd6e11e6e5b681b
```
## User Feedback
You all users of japonicus should report notable runs under an issue.
If some strat seems to be viable, send feedback so users can have a better point of entry for their own runs.
## Future
Genetic Algorithms are a good way to fetch a good set of settings to run a strategy on gekko.
But the real gamechanger is the strategy itself.
The ideal evolution method would be a Genetic Programming that modifies strategy logic.
This somewhat corresponds to `--skeleton` mode of japonicus, which lets the GA select indicators on a base strategy.
# Changelog
The changelog is important for the user as following modifications can cause bugs on related areas. Please report 'em ;)
```
v0.92
- Moving all gekko related functions to evaluation.gekko module. The purpose is making japonicus a general purpose
GA framework.
v0.91
- the evolution candle date ranges are now defined by given area in the map, instead of attached at each locale.
v0.90
- web interface reworked - now it is the recommended method to run the ga's.
- locale creation/destruction chances updated.
- bayesian evolution method deprecated.
v0.80
- supports gekko v0.6.X (only).
- Dockerfile and docker-compose methods revisited.
- automatic filter for multiple remote gekko urls (urls defined inside settings/global)
- live trading bot watcher at `jlivetrader.py`. For binance only, undocumented and experimental.
v0.70
- log folder restructured
- configStrategies.py DEPRECATED; use only TOML parameters at the folder strategy_parameters.
check TOML special syntax for parameter ranges at the wiki
- GA benchmark mode added
- Settings.py refactor
- Roundtrip exposure time filter
v0.58
- runs in Windows (not confirmed)
- Settings parameters can be passed on command line (check the --help)
- Multiple evolution datasets can be passed. `@Settings.py:dataset ->
dataset_source is the first, add dataset_source1; dataset_source2 and so forth
for multiple datasets.`
- filter individues for minimum trade count (default: enabled@16 trades)
- backtest scores (profit and sharpe) to individue final score method is now a sum, not multiplication
v0.56
- japonicus settings for strategies can be stored at strategy_parameters folder as .toml files
- automated refactor on entire codebase
- wiki is online, check it for instructions.
- various bugfixes
- log & results improvements
- daterange for locales now on locale logs (.csv)
- statistics methods remade.
v0.54
- Variation of Backtest result interpretation. check Settings.py -> genconf.interpreteBacktestProfit
- Focus on selecting best individues. Periodic evaluation on more candidates. Bugfixes on that department.
- Result interface actually readable.
- Log better structured, with the summary at the top.
- Small clarifications on code.
v0.53
- Major aesthetics rework on code itself; now we can even have collaborators.
- Pretty run logs @ logs folder;
- Interchangeable backtest result interpretation (promoterz.evaluation.gekko:25)
- gekko API is now organized - backtest & dadataset functions separated.
- Genetic Algorithm settings controllable via command line. Check --help.
- Web interface more stable
v0.51
- Started tracking updates on changelog;
```
================================================
FILE: docker-compose.yaml
================================================
version: '3'
services:
gekko:
image: gekko
volumes:
- gekko-dir:/usr/src/app
ports:
- "3000:3000"
japonicus:
command: $JARGS
image: japonicus
build: .
volumes:
- gekko-dir:/root/gekko
ports:
- "5000:5000"
depends_on:
- gekko
volumes:
gekko-dir:
================================================
FILE: evaluation/__init__.py
================================================
#!/bin/python
from .import gekko
from . import benchmark
================================================
FILE: evaluation/benchmark/__init__.py
================================================
#!/bin/python
from . import benchmark
================================================
FILE: evaluation/benchmark/benchmark.py
================================================
#!/bin/python
# source https://www.researchgate.net/publication/27382766_On_benchmarking_functions_for_genetic_algorithm
import random
import math
def evalRosenbrock(parameters):
Result = pow(1-parameters[0], 2)
Result += 100 * pow(pow(parameters[0], 2) - parameters[1], 2)
return -Result
def evalGriewangk(parameters):
Dimensions = 10
Result = 1
for w in range(Dimensions):
W = w + 1
Result += pow(parameters[w], 2) / 4000
COSs = math.cos(parameters[0])
for z in range(1, Dimensions):
Z = z + 1
COSs *= (math.cos(parameters[z]) / math.sqrt(Z))
Result -= COSs
return -Result
def evalRastrigin(parameters):
Dimensions = 20
Result = 10 * Dimensions
for w in range(Dimensions):
W = w + 1
Result += pow(parameters[w], 2)
Result -= 10 * (math.cos(2*math.pi*parameters[w]))
return -Result
def evalSchwefel(parameters):
A = 4189.829101
Open = 10 * A
Result = Open
for w in range(10):
W = w + 1
Result += -parameters[w] * math.sin(math.sqrt(abs(parameters[w])))
return -Result
def evalQuartic(parameters):
Result = 0
for w in range(30):
W = w + 1
Result += W * pow(parameters[w], 4) + random.gauss(0, 1)
return -Result
def evalFoxHole(parameters):
# MIN = 0.998003837794449325873406851315
Result = 0.002
a = [
[-32, -16, 0, 16, 32,
-32, -16, 0, 16, 32,
-32, -16, 0, 16, 32,
-32, -16, 0, 16, 32,
-32, -16, 0, 16, 32],
[-32, -32, -32, -32, -32,
-16, -16, -16, -16, -16,
0, 0, 0, 0, 0,
16, 16, 16, 16, 16,
32, 32, 32, 32, 32]
]
for w in range(25):
W = 1+w
D = W
for k in range(2):
D += pow((parameters[k] - a[k][w]), 6)
Result += (1/D)
Result = 1/Result
return -Result
def Evaluate(genconf, phenotype):
evalFunctionName = list(phenotype.keys())[0]
parameters = phenotype[evalFunctionName]
parameters = [parameters[N] for N in sorted(list(parameters.keys()))]
evalFunctions = {
'quartic': evalQuartic,
'foxhole': evalFoxHole,
'schwefel': evalSchwefel,
'rastrigin': evalRastrigin,
'griewangk': evalGriewangk,
'rosenbrock': evalRosenbrock
}
result = evalFunctions[evalFunctionName](parameters)
return {
'relativeProfit': result,
'sharpe': 0,
'trades': 25,
'averageExposure': 0
}
================================================
FILE: evaluation/benchmark/generateConfig.py
================================================
#!/bin/python
import pytoml
NBP = 30
PRANGE = [-1.28, 1.28]
NBP = 25
PRANGE = [-65536, 65536]
NBP = 10
PRANGE = [-500, 500]
NBP = 20
PRANGE = [-5.12, 5.12]
#NBP = 10
#PRANGE = [-600, 600]
#NBP = 2
#PRANGE = [-2.048, 2.048]
PARAMETERS = {}
for P in range(NBP):
PNAME = 'P%i' % P
PARAMETERS.update({PNAME: PRANGE})
TOMLTEXT = pytoml.dumps(PARAMETERS)
open('config.toml', 'w').write(TOMLTEXT)
================================================
FILE: evaluation/gekko/API.py
================================================
#!/bin/python
import os
import requests
import json
from subprocess import Popen, PIPE
def initializeGekko(): # not used yet.
CMD = ['node', gekkoDIR + '/gekko', '--ui']
D = Popen(CMD, stdin=PIPE, stdout=PIPE, stderr=PIPE)
def checkInstance(instanceUrl):
try:
Request = requests.get(instanceUrl)
except Exception:
return False
if Request.text:
return True
def httpPost(URL, data={}, Verbose=True):
try:
Request = requests.post(URL, json=data)
Response = json.loads(Request.text)
except ConnectionRefusedError:
print("Error: Gekko comm error! Check your local Gekko instance.")
exit()
except Exception as e:
if Verbose:
print("Error: config failure")
print(e)
print(URL)
print(data)
return False
return Response
def loadHostsFile(HostsFilePath):
remoteGekkos = []
if os.path.isfile(HostsFilePath):
H = open(HostsFilePath).read().split('\n')
for W in H:
if W and not '=' in W and not '[' in W:
remoteGekkos.append("http://%s:3000" % W)
return remoteGekkos
================================================
FILE: evaluation/gekko/__init__.py
================================================
#!/bin/python
import os
import subprocess
from .import API
from .import dataset
from .import backtest
from .import datasetOperations
from .statistics import *
import pathlib
import promoterz
class GekkoEvaluator():
def __init__(self):
pass
SettingsFiles = [
"generation",
"Global",
"dataset",
#"indicator",
"backtest",
"evalbreak"
]
def showBacktestResult(backtestResult, dataset=None):
messageBackbone = ''.join([
'Test on random candles... ',
'relativeProfit: %.3f \t',
'nbTrades: %.1f\t',
'sharpe: %.2f'
])
message = messageBackbone % (
backtestResult['relativeProfit'],
backtestResult['trades'],
backtestResult['sharpe']
)
if dataset:
message += "\n\t\t%s\t%s" % (dataset.textDaterange(),
dataset.textSpecifications())
return message
def parseDatasetInfo(purpose, candlestickDataset):
textdaterange = datasetOperations.dateRangeToText(
candlestickDataset.daterange)
print()
Text = "\n%s candlestick dataset %s\n" % (purpose, textdaterange)
Text += candlestickDataset.textSpecifications() + '\n'
return Text
def showPrimaryInfo(Logger, evolutionDatasets, evaluationDatasets):
for evolutionDataset in evolutionDatasets:
Logger.log(
parseDatasetInfo("evolution", evolutionDataset),
target="Header"
)
if evaluationDatasets:
for evaluationDataset in evaluationDatasets:
Logger.log(
parseDatasetInfo("evaluation", evaluationDataset),
target="Header"
)
class GekkoEvaluationPool(promoterz.evaluationPool.EvaluationPool):
#def __init__(self, World, Urls, poolsize, individual_info):
# pass
def ejectURL(self, Index):
self.Urls.pop(Index)
self.lasttimes.pop(Index)
self.lasttimesperind.pop(Index)
self.poolsizes.pop(Index)
def distributeIndividuals(self, tosimulation):
nb_simulate = len(tosimulation)
sumtimes = sum(self.lasttimes)
# stdtime = sum(self.lasttimes)/len(self.lasttimes)
std = nb_simulate / len(self.Urls)
# stdTPI = sum(self.lasttimesperind)/len(self.lasttimesperind)
#print(stdTPI)
if sumtimes:
vels = [1 / x for x in self.lasttimes]
constant = nb_simulate / sum(vels)
proportions = [max(1, x * constant) for x in vels]
else:
proportions = [std for x in self.Urls]
proportions = [int(round(x)) for x in proportions]
pC = lambda x: random.randrange(0, len(x))
pB = lambda x: x.index(min(x))
pM = lambda x: x.index(max(x))
while sum(proportions) < nb_simulate:
proportions[pB(proportions)] += 1
print('+')
while sum(proportions) > nb_simulate:
proportions[pM(proportions)] -= 1
print('-')
print(proportions)
assert (sum(proportions) == nb_simulate)
distribution = []
L = 0
for P in proportions:
distribution.append(tosimulation[L: L + P])
L = L + P
return distribution
EvaluationPool = GekkoEvaluationPool
def ResultToIndividue(result, individue):
individue.fitness.values = (result['relativeProfit'], result['sharpe'])
individue.trades = result['trades']
individue.averageExposure = result['averageExposure'] / 3600000
def showIndividue(evaldata):
return "~ bP: %.3f\tS: %.3f\tnbT:%.3f" % (
evaldata['relativeProfit'], evaldata['sharpe'], evaldata['trades']
)
def validateSettings(settings):
# LOCATE & VALIDATE RUNNING GEKKO INSTANCES FROM CONFIG URLs;
possibleInstances = settings['Global']['GekkoURLs']
validatedInstances = []
for instance in possibleInstances:
Response = API.checkInstance(instance)
if Response:
validatedInstances.append(instance)
print("found gekko @ %s" % instance)
else:
print("unable to locate %s" % instance)
if validatedInstances:
settings['Global']['GekkoURLs'] = validatedInstances
else:
print("Aborted: No running gekko instances found.")
return False
GekkoPath = settings['Global']['gekkoPath'] + '/gekko.js'
GekkoPath = GekkoPath.replace("$HOME", str(pathlib.Path.home()))
# FIX THIS;
if False and not os.path.isfile(GekkoPath):
print(
"Aborted: gekko.js not found" +
"on path specified @Settings.py;\n%s" % GekkoPath)
return False
return True
# DEPRECATED;
def launchGekkoChildProcess(settings):
gekko_args = [
'node',
'--max-old-space-size=8192',
settings['global']['gekkoPath'] + '/web/server.js',
]
gekko_server = subprocess.Popen(gekko_args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
return gekko_server
================================================
FILE: evaluation/gekko/backtest.py
================================================
#!/bin/python
from .API import httpPost
def interpreteBacktestProfitv1(backtest):
return backtest['relativeProfit']
def interpreteBacktestProfitv2(backtest):
return backtest['relativeProfit'] - backtest['market']
def interpreteBacktestProfitv3(backtest):
if backtest['relativeProfit'] < 0 and backtest['market'] < 0:
return backtest['relativeProfit']
else:
return backtest['relativeProfit'] - backtest['market']
def getInterpreterBacktestInfo(v):
info = {
'v1': " = ",
'v2': " = - ",
'v3': "\nif > 0: = - \nelse = "
}
return "interpreter %s: " % v + info[v]
def runBacktest(
GekkoInstanceUrl,
TradeSetting,
Dataset,
candleSize=10,
gekko_config=None,
Debug=False,
):
gekko_config = createConfig(
TradeSetting, Dataset.specifications, Dataset.daterange, candleSize,
gekko_config, Debug
)
url = GekkoInstanceUrl + '/api/backtest'
fakeReport = {
'relativeProfit': 0, 'market': 0, 'trades': 0,
'sharpe': 0, 'roundtrips': []
}
try:
result = httpPost(url, gekko_config)
# sometime report is False(not dict)
if type(result['performanceReport']) is bool:
print("Warning: performanceReport not found, probable Gekko fail!")
print(Dataset.specifications)
# That fail is so rare that has no impact.. still happens randomly;
return fakeReport # fake backtest report
except Exception as e:
print(e)
return fakeReport
# rProfit = result['report']['relativeProfit']
# nbTransactions = result['report']['trades']
# market = result['report']['market']
backtestResult = result['performanceReport']
if 'roundtrips' in result.keys():
backtestResult['roundtrips'] = result['roundtrips']
return backtestResult
def Evaluate(backtestconf, Datasets, phenotype, GekkoInstanceUrl):
# IndividualToSettings(IND, STRAT) is a function that depends on GA algorithm,
# so should be provided;
result = [
runBacktest(
GekkoInstanceUrl,
phenotype,
Dataset,
candleSize=backtestconf.candleSize,
Debug=backtestconf.gekkoDebug,
)
for Dataset in Datasets
]
interpreter = {
'v1': interpreteBacktestProfitv1,
'v2': interpreteBacktestProfitv2,
'v3': interpreteBacktestProfitv3,
}
# --INTERPRETE BACKTEST RESULT;
RelativeProfits = [interpreter[backtestconf.interpreteBacktestProfit](R) for R in result]
avgTrades = sum([R['trades'] for R in result]) / len(Datasets)
mRelativeProfit = sum(RelativeProfits) / len(RelativeProfits)
avgSharpe = sum([R['sharpe'] for R in result if R['sharpe']])
avgSharpe = avgSharpe / len(Datasets)
# --CALCULATE EXPOSURE DURATIONS;
for R in result:
R['totalExposure'] = 0
R['averageExposure'] = 0
if 'roundtrips' in R.keys():
for roundtrip in R['roundtrips']:
R['totalExposure'] += roundtrip['duration']
R['averageExposure'] = R['totalExposure'] / len(R['roundtrips']) if len(R['roundtrips']) else 0
avgExposure = sum(R['averageExposure'] for R in result) / len(Datasets)
return {
'relativeProfit': mRelativeProfit,
'sharpe': avgSharpe,
'trades': avgTrades,
'averageExposure': avgExposure
}
def createConfig(
TradeSetting, Database, DateRange,
candleSize=10, gekko_config=None, Debug=False
):
TradeMethod = list(TradeSetting.keys())[0]
CONFIG = {
"watch": Database,
"paperTrader": {
"fee": 0.25, # declare deprecated 'fee' so keeps working w/ old gekko;
"feeMaker": 0.15,
"feeTaker": 0.25,
"feeUsing": 'maker',
"slippage": 0.05,
"simulationBalance": {"asset": 1, "currency": 100},
"reportRoundtrips": True,
"enabled": True,
},
"tradingAdvisor": {
"enabled": True,
"method": TradeMethod,
"candleSize": candleSize, # candleSize: smaller = heavier computation + better possible results;
"historySize": 10,
},
TradeMethod: TradeSetting[TradeMethod],
"backtest": {"daterange": DateRange},
"performanceAnalyzer": {"riskFreeReturn": 2, "enabled": True},
"valid": True,
"data": {
"candleProps": [
"id", "start", "open", "high", "low", "close", "vwp", "volume", "trades"
],
"indicatorResults": True,
"report": True,
"roundtrips": True,
"trades": True,
},
"backtestResultExporter": {
"enabled": True,
"writeToDisk": False,
"data": {
"stratUpdates": False,
"roundtrips": True,
"stratCandles": False,
"stratCandleProps": [
"open"
],
"trades": False
}
}
}
if gekko_config == None:
gekko_config = CONFIG
return gekko_config
================================================
FILE: evaluation/gekko/dataset.py
================================================
#!/bin/python
import random
import datetime
from .API import httpPost
def getAllScanset(GekkoURL):
URL = GekkoURL + '/api/scansets'
RESP = httpPost(URL)
return RESP['datasets']
def selectCandlestickData(GekkoURL,
exchange_source=None,
avoidCurrency=None,
minDays=None):
DataSetPack = getAllScanset(GekkoURL)
specKeys = ['exchange', 'currency', 'asset']
scanset = []
# IF EXCHANGE SPECIFICATIONS ARE TO BRE IGNORED;
if 'autoselect' in exchange_source.keys():
if exchange_source['autoselect']:
exchange_source = None
# SEARCH CANDIDATE DATASETS AMONG THOSE OBTAINED FROM GEKKO API;
for s in DataSetPack:
Valid = True
for k in specKeys:
if exchange_source and s[k] != exchange_source[k]:
Valid = False
if avoidCurrency and not exchange_source:
if s["asset"] == avoidCurrency:
Valid = False
if Valid:
scanset.append(s)
# IN CASE NO CANDLESTICK DATASET IS COMPATIBLE;
if len(scanset) == 0:
if exchange_source:
raise RuntimeError(
"scanset not available: %s\n\tscanset found: %s" %
(exchange_source, DataSetPack)
)
else:
raise RuntimeError("no scanset available! check Gekko candle database.")
# SEARCH ON ALL FOUND SCANSETS;
for EXCHANGE in scanset:
ranges = EXCHANGE['ranges']
# no ranges found?
if not ranges:
# print("No scansets found for %s" % EXCHANGE)
continue
range_spans = [x['to'] - x['from'] for x in ranges]
LONGEST = range_spans.index(max(range_spans))
EXCHANGE['max_span'] = range_spans[LONGEST]
EXCHANGE['max_span_index'] = LONGEST
# COMPILE MOST INTERESTING SCANSETS;
availableScanset = [exchange for exchange in scanset
if 'max_span' in exchange.keys()]
exchange_longest_spans = [x['max_span'] for x in availableScanset]
if minDays is not None:
exchange_longest_spans = [
span for span in exchange_longest_spans
if span > minDays * 24 * 3600
]
# Without scansets we cannot continue.
if not exchange_longest_spans:
print("FATAL: No scanset available.")
return None
best_exchange = exchange_longest_spans.index(max(exchange_longest_spans))
best_exchange_span =\
availableScanset[best_exchange]['max_span_index']
chosenScansetRange =\
availableScanset[best_exchange]['ranges'][best_exchange_span]
chosenScansetSpecifications = {
K: availableScanset[best_exchange][K]
for K in availableScanset[best_exchange]
if K in specKeys
}
return chosenScansetSpecifications, chosenScansetRange
def getCandles(globalconf, DateRange, Dataset, size=100):
base = random.choice(globalconf.GekkoURLs)
URL = base + "/api/getCandles"
CONFIG = {
"watch": Dataset.specifications,
"daterange": DateRange,
"adapter": "sqlite",
"sqlite": {
"path": "plugins/sqlite",
"dataDirectory": "history",
"version": 0.1,
"dependencies": [{"module": "sqlite3", "version": "3.1.4"}],
},
"candleSize": size,
}
RESULT = httpPost(URL, CONFIG)
return RESULT
def getDateRange(Limits, deltaDays=3):
DateFormat = "%Y-%m-%d %H:%M:%S"
deltams = deltaDays * 24 * 60 * 60
DateRange = {
"from": "%s" % epochToString(Limits['to'] - deltams),
"to": "%s" % epochToString(Limits['to']),
}
return DateRange
def getRandomDateRange(Limits, deltaDays):
FLms = Limits['from']
TLms = Limits['to']
deltams = deltaDays * 24 * 60 * 60
if deltams > (TLms - FLms):
print(
"Fatal: deltaDays on Settings.py set to a value bigger than current dataset.\n Edit Settings file to fit your chosen candlestick data."
)
exit(1)
Start = random.randint(FLms, TLms - deltams) if deltaDays else FLms
End = (Start + deltams) if deltaDays else TLms
DateRange = {
"from": "%s" % epochToString(Start),
"to": "%s" % epochToString(End)
}
return DateRange
def epochToString(D):
return datetime.datetime.utcfromtimestamp(D).strftime(
"%Y-%m-%d %H:%M:%S"
)
================================================
FILE: evaluation/gekko/datasetOperations.py
================================================
#!/bin/python
import evaluation
import random
class CandlestickDataset():
def __init__(self, specifications, daterange):
self.daterange = daterange
self.specifications = specifications
def restrain(self, deltaDays):
if not deltaDays:
return
deltams = deltaDays * 24 * 60 * 60
restrainedInit = self.daterange['to'] - deltams
self.daterange['from'] = max(self.daterange['from'], restrainedInit)
def textDaterange(self):
return dateRangeToText(self.daterange)
def textSpecifications(self):
message = "%s/%s @%s" % (self.specifications["asset"],
self.specifications["currency"],
self.specifications["exchange"])
return message
def __str__(self):
return self.textSpecification()
def getRandomSectorOfDataset(sourceDataset, deltaDays):
G = evaluation.gekko.dataset.getRandomDateRange
dateRange = G(sourceDataset.daterange, deltaDays)
newDataset = CandlestickDataset(sourceDataset.specifications,
dateRange)
return newDataset
def getLocaleDataset(World, Type='evolution'):
localeDataset = []
for DS in range(World.conf.backtest.ParallelCandlestickDataset):
sourceDataset = random.choice(World.EnvironmentParameters['evolution'])
newDataset = getRandomSectorOfDataset(sourceDataset,
World.conf.backtest.deltaDays)
localeDataset.append(newDataset)
return localeDataset
def dateRangeToText(dateRange):
def convertDateRange(x):
if type(x) == int:
return evaluation.gekko.dataset.epochToString(x)
else:
return x
Range = [
convertDateRange(dateRange[x]) for x in ['from', 'to']
]
Text = "%s to %s" % (Range[0], Range[1])
return Text
================================================
FILE: evaluation/gekko/statistics.py
================================================
#!/bin/python
from deap import tools
import numpy as np
epochStatisticsNames = {
'avg': 'Average profit',
'std': 'Profit variation',
'min': 'Minimum profit',
'max': 'Maximum profit',
'size': 'Population size',
'maxsize': 'Max population size',
'avgTrades': 'Avg trade number',
'sharpe': 'Avg sharpe ratio',
'avgExposure': "Avg exposure time",
'nbElderDies': 'Elder dies count'
}
periodicStatisticsNames = {
'evaluationScore': "Evaluation Score",
'evaluationScoreOnSecondary': "Score on Secondary Dataset"
}
def compileStats(locale):
# --get proper evolution statistics;
Stats = locale.stats.compile(locale.population)
Stats['dateRange'] = ' '.join([DR.textDaterange()
for DR in locale.Dataset])\
if not locale.EPOCH else None
Stats['maxsize'] = locale.POP_SIZE
Stats['size'] = len(locale.population)
Stats['avgTrades'] = locale.extraStats['avgTrades']
Stats['avgExposure'] = locale.extraStats['avgExposure']
#Stats['nbElderDies'] = locale.extraStats['nbElderDies']
Stats['sharpe'] = np.mean([x.fitness.values[1] for x in locale.population])
Stats['evaluationScoreOnSecondary'] = locale.lastEvaluationOnSecondary
Stats['evaluationScore'] = locale.lastEvaluation
locale.lastEvaluationOnSecondary = None
locale.lastEvaluation = None
Stats['id'] = locale.EPOCH
locale.EvolutionStatistics.append(Stats)
locale.World.logger.write_evolution_logs(
locale.EPOCH, locale.EvolutionStatistics, locale.name
)
def showStatistics(locale):
# show information;
Stats = locale.EvolutionStatistics[locale.EPOCH]
print("EPOCH %i\t&%i" % (locale.EPOCH, locale.extraStats['nb_evaluated']))
statnames = ['max', 'avg', 'min',
'std', 'size', 'maxsize',
'avgTrades', 'sharpe', 'avgExposure',
# 'nbElderDies'
]
statisticsText = []
for s in range(len(statnames)):
SNAME = statnames[s]
SVAL = Stats[SNAME]
currentStatisticsText = "%s" % epochStatisticsNames[SNAME]
if not SVAL % 1:
currentStatisticsText += " %i" % SVAL
else:
currentStatisticsText += " %.3f" % SVAL
statisticsText.append(currentStatisticsText)
columnWidth = max([len(STXT) for STXT in statisticsText]) + 3
for j in range(0, len(statisticsText), 2):
print(''.join(word.ljust(columnWidth) for word in statisticsText[j:j+2]))
print()
================================================
FILE: exchangerun.csv
================================================
EXCHANGE,CURRENCY,ASSET,STRATEGY
binance,usdt,btc,RSI_BULL_BEAR_ADX
binance,usdt,bcc,RSI_BULL_BEAR_ADX
binance,usdt,ltc,RSI_BULL_BEAR_ADX
binance,usdt,neo,RSI_BULL_BEAR_ADX
binance,usdt,qtum,RSI_BULL_BEAR_ADX
binance,usdt,bnb,RSI_BULL_BEAR_ADX
binance,usdt,eth,RSI_BULL_BEAR_ADX
================================================
FILE: gekko_evolution.yml
================================================
- name: prepare machine software and install Gekko Trading Bot
hosts: all
remote_user: ec2-user
become_method: sudo
tasks:
- name: update cache
become: yes
command: "yum update -y"
- name: Install environment components
become: yes
shell: yum install -y {{item}}
with_items:
- git
- tmux
- name: get NODEjs
become: yes
#shell: "curl -sL https://deb.nodesource.com/setup_8.x | sudo -E bash -" UBUNTU LINUX;
shell: "curl -sL https://rpm.nodesource.com/setup_8.x | bash -"
- name: install NODEjs
become: yes
command: "yum install -y nodejs"
- name: clone gekko git repo
command: "git clone https://github.com/askmike/gekko"
ignore_errors: yes
- name: install gekko nodejs libs
command: "npm install --only=production"
args:
chdir: "gekko"
- name: install problematic nodejs lib
become: yes
command: "npm install -G sqlite3"
- name: edit gekko UI config
shell: sed -i 's/{{ item }}/0.0.0.0/' gekko/web/vue/UIconfig.js
with_items:
- 127.0.0.1
- name: edit gekko UI config pt2
command: "sed -i 's/localhost/{{ inventory_hostname }}/' gekko/web/vue/UIconfig.js"
- copy:
src: ~/gekko/history
dest: ~/gekko
- name: run gekko UI
command: "tmux new -d -s Gekko 'node gekko.js --ui; detach \\;'"
args:
chdir: "gekko"
================================================
FILE: japonicus/Settings.py
================================================
#!/bin/python
import js2py
from pathlib import Path
from .configStrategies import cS
from .configIndicators import cI
import os
import pytoml
class makeSettings(dict):
def __init__(self, entries):
for K in entries.keys():
if type(entries[K]) == dict:
entries[K] = makeSettings(entries[K])
self.__dict__.update(entries)
self.update(entries)
def getSettings(SettingsFiles=[], specific=None):
HOME = str(Path.home())
settings = {}
for SettingsFile in SettingsFiles:
settings[SettingsFile] = loadTomlSettings(SettingsFile)
return settings
s = {
# gekko global settings;
'global': loadTomlSettings('global'),
# gekko backtest settings;
'backtest': loadTomlSettings('backtest'),
# evaluation break settings;
'evalbreak': loadTomlSettings('evalbreak'),
# genetic algorithm settings;
'generations': loadTomlSettings('generations'),
'dataset': loadTomlSettings('dataset'),
'strategies': cS,
'indicators': cI,
'skeletons': {
'ontrend': {
"SMA_long": 1000,
"SMA_short": 50
}
}
}
if specific is not None:
if not specific:
return makeSettings(s)
else:
return makeSettings(s[specific])
return s
def loadTomlSettings(settingsDivisionName):
userSettingsAndDefaultSettings = [
'%s.toml' % settingsDivisionName,
'_%s.toml' % settingsDivisionName
]
for targetFile in userSettingsAndDefaultSettings:
filePath = os.path.join('settings', targetFile)
if os.path.isfile(filePath):
Settings = pytoml.load(open(filePath))
return Settings
exit("Failed to load settings! %s" % settingsDivisionName)
def get_configjs(filename="example-config.js"):
with open(filename, "r") as f:
text = f.read()
text = text.replace("module.exports = config;","config;")
return js2py.eval_js(text).to_dict()
================================================
FILE: japonicus/__init__.py
================================================
#!/bin/python
from .japonicus import *
from . import options
from . import interface
================================================
FILE: japonicus/configIndicators.py
================================================
#!/bin/python
cI = {
"ADX": {"active": True, "period": 14, "thresholds.up": 70, "thresholds.down": 50},
"ATR": {"active": True, "period": 14, "thresholds.up": 70, "thresholds.down": 50},
"PPO": {
"active": True,
"short": (6, 18), # short EMA
"long": (13, 39), # long EMA
"signal": (1, 18), # 100 * (shortEMA - longEMA / longEMA)
"thresholds.down": (-0.5, 0.), # trend thresholds
"thresholds.up": (0., 0.5), # trend thresholds
},
"TSI": {
"active": True,
"thresholds.up": (15, 35),
"thresholds.down": (-35, -15),
"short": (3, 12),
"long": (15, 35),
},
"LRC": {
"active": True,
"thresholds.up": (15, 35),
"thresholds.down": (-35, -15),
"depth": (3, 18),
},
"RSI": {
"active": True,
"interval": (7, 21), # weight
"thresholds.down": (15, 45), # trend thresholds
"thresholds.up": (45, 140), # trend thresholds
},
"SMMA": {
"active": True,
"weight": (7, 16),
"thresholds.up": (0, 0.1),
"thresholds.down": (-0.1, 0),
},
"DEMA": {
"active": True,
"short": (7, 15),
"long": (12, 35),
"thresholds.up": (0, 0.1),
"thresholds.down": (-0.1, 0),
},
"CCI": {
"active": True,
"consistant": (7, 21), # constant multiplier. 0.015 gets to around 70% fit
"history": (45, 135), # history size, make same or smaller than history
"thresholds.down": (-150, -50), # trend thresholds
"thresholds.up": (50, 150), # trend thresholds
"thresholds.persistence": (4, 10),
},
}
================================================
FILE: japonicus/configStrategies.py
================================================
#!/bin/python
NEG = lambda v: (-v[1], -v[0])
cS = {
# Define values for strat settings for strategies to be used
# on japonicus;
# Each value can be a tuple of limits or just a base value.
"ontrend" : {
"bull_momentum_high": 80,
"bull_momentum_low": 60,
"bear_momentum_high": 50,
"bear_momentuum_low": 20,
"sec_high": 70,
"sec_low": 50
},
"rsi_bbands": {
"NbDevUp": 2,
"NbDevDn": 2,
"TimePeriod": 9,
"rsi_high": 60,
"rsi_low": 20,
"min_hold": 5,
"swing_trade": 0.5
},
"RSI_BULL_BEAR" : {
# SMA Trends
"SMA_long": 1000,
"SMA_short": 50,
# BULL
"BULL_RSI": 10,
"BULL_RSI_high": 80,
"BULL_RSI_low" : 60,
# BEAR
"BEAR_RSI": 15,
"BEAR_RSI_high": 50,
"BEAR_RSI_low" : 20
},
"RSI_BULL_BEAR_ADXold" : {
# SMA Trends
"SMA_long": 1000,
"SMA_short": 50,
# BULL
"BULL_RSI": 10,
"BULL_RSI_high": 80,
"BULL_RSI_low" : 60,
# BEAR
"BEAR_RSI": 15,
"BEAR_RSI_high": 50,
"BEAR_RSI_low" : 20,
# ADX
"ADX": 3,
"ADX_high": 70,
"ADX_low": 50
},
"RSI_BULL_BEAR_ADX" : {
# SMA Trends
"SMA_long": 1000,
"SMA_short": 50,
# BULL
"BULL_RSI": 10,
"BULL_RSI_high": 80,
"BULL_RSI_low" : 60,
# BEAR
"BEAR_RSI": 15,
"BEAR_RSI_high": 50,
"BEAR_RSI_low" : 20,
# ADX
"ADX": 3,
"ADX_high": 70,
"ADX_low": 50,
"BULL_MOD_high": 5,
"BULL_MOD_low": -5,
"BEAR_MOD_high": 15,
"BEAR_MOD_low": -5
},
"Bestone" :{
"customMACDSettings": {
"optInFastPeriod": (3,10),
"optInSlowPeriod": (20,50),
"optInSignalPeriod": (5,15)
},
"customEMAshortSettings": {
"optInTimePeriod": (5,15)
},
"customEMAlongSettings": {
"optInTimePeriod": (15,26)
},
"customSTOCHSettings": {
"optInFastKPeriod": (6, 14),
"optInSlowKPeriod": (2,5),
"optInSlowKMAType": (1,1),
"optInSlowDPeriod": (2,5),
"optInSlowDMAType": (1,1)
},
"customRSISettings": {
"optInTimePeriod": (7,20)
}
},
"PPOTSI":{
"PPO.short": (3,16),
"PPO.long": (12,35),
"PPO.signal":(3,21),
"PPO.up": (0., 1),
"PPO.down": (-1, 0.),
"TSI.up": (10,40),
"TSI.down": (-40,-10),
"TSI.short": (3,18),
"TSI.long": (10,42),
"persistence": (1,10)
},
"Supertrend": {
"atrEma":(1,10),
"bandFactor": (1,10)
},
"PPOLRC":{
"PPO.short": (3,12),
"PPO.long": (15,35),
"PPO.signal":(3,18),
"PPO.up": (0., 0.5),
"PPO.down": (-0.5, 0.),
"LRC.up": (15,35),
"LRC.down": (-35,-15),
"LRC.depth": (3,18),
"persistence": (1,5)
},
"buyatsellat": {
'buyat': (1.03,1.20),
'sellat': (0.92, 0.97),
'stop_loss_pct': (0.87, 0.95),
'sellat_up': (1.01,1.20)
},
"buyatsellatPPO": {
'buyat': (1.03,1.20),
'sellat': (0.92, 0.97),
'stop_loss_pct': (0.87, 0.95),
'sellat_up': (1.01,1.20),
"short": (6,18), # short EMA
"long": (13,39), # long EMA
"signal": (1,18), # 100 * (shortEMA - longEMA / longEMA)
"thresholds.down": (-0.5,0.), # trend thresholds
"thresholds.up": (0.,0.5), # trend thresholds
"thresholds.persistence": (2,10), # trend duration(count up by tick) thresholds
},
"DEMA":{
"short": (1,10), # short EMA
"long": (20,50), # long EMA
"thresholds.down": (-0.5,0.1), # trend thresholds
"thresholds.up": (-0.1,0.5), # trend thresholds
},
"MACD":{
"short": (1,10), # short EMA
"long": (20,50), # long EMA
"signal": (9,18), # shortEMA - longEMA diff
"thresholds.down": (-0.5,0.), # trend thresholds
"thresholds.up": (0.,0.5), # trend thresholds
"thresholds.persistence": (2,10), # trend duration(count up by tick) thresholds
},
"PPO":{
"short": (6,18), # short EMA
"long": (13,39), # long EMA
"signal": (1,18), # 100 * (shortEMA - longEMA / longEMA)
"thresholds.down": (-0.5,0.), # trend thresholds
"thresholds.up": (0.,0.5), # trend thresholds
"thresholds.persistence": (2,10), # trend duration(count up by tick) thresholds
},
# Uses one of the momentum indicators but adjusts the thresholds when PPO is bullish or bearish
# Uses settings from the ppo and momentum indicator config block
"varPPO":{ # TODO: merge PPO config
"short": (6,18), # short EMA
"long": (13,39), # long EMA
"signal": (1,18), # 100 * (shortEMA - longEMA / longEMA)
"thresholds.down": (-0.5,0.), # trend thresholds
"thresholds.up": (0.,0.5), # trend thresholds
"thresholds.persistence": (0,4), # trend duration(count up by tick) thresholds
"momentum": (0, 2.99999), # index of ["RSI", "TSI", "UO"]
# new threshold is default threshold + PPOhist * PPOweight
"weightLow": (60, 180),
"weightHigh": (-180, -60),
},
"RSI":{
"interval": (7,21), # weight
"thresholds.low": (15,45), # trend thresholds
"thresholds.high": (45,140), # trend thresholds
"thresholds.persistence": (4,10), # trend duration(count up by tick) thresholds
},
"StochRSI":{
"interval": (7,21), # weight
"thresholds.low": (15,45), # trend thresholds
"thresholds.high": (45,140), # trend thresholds
"thresholds.persistence": (4,10), # trend duration(count up by tick) thresholds
},
"CCI":{
"consistant": (7,21), # constant multiplier. 0.015 gets to around 70% fit
"history": (45,135), # history size, make same or smaller than history
"thresholds.down": (-150,-50), # trend thresholds
"thresholds.up": (50,150), # trend thresholds
"thresholds.persistence": (4,10), # trend duration(count up by tick) thresholds
},
"UO":{
"first.weight": (2,8), #
"first.period": (4.5,14), #
"second.weight": (1,4), #
"second.period": (7,28), #
"third.weight": (0.5,2), #
"third.period": (14,56), #
"thresholds.low": (15,45), # trend thresholds
"thresholds.high": (45,140), # trend thresholds
"thresholds.persistence": (0,4), # trend duration(count up by tick) thresholds
},
"MRBB": {
"short": (3, 12),
"long": (12, 32),
"signal": (6, 23),
"interval": (7, 23),
"crosspersistence": (7, 30),
"macdhigh": (0.1,0.6),
"macdlow": (-0.6,-0.1),
"rsihigh": (30,100),
"rsilow": (1,35),
"bbands.TimePeriod": (16,22),
"bbands.NbDevUp": (1,3),
"bbands.NbDevDn": (1,3),
"bbands.MAType": (1,3)
}
}
================================================
FILE: japonicus/evolution_generations.py
================================================
#!/bin/python
import json
import time
import sys
import promoterz
import evaluation
from . import interface
from .Settings import getSettings, makeSettings
import stratego
from functools import partial
import evaluation.gekko.datasetOperations as datasetOperations
StrategyFileManager = None
# TEMPORARY ASSIGNMENT OF EVAL FUNCTIONS; SO THINGS REMAIN ¿SANE;
def indicatorEvaluate(
StrategyFileManager,
constructPhenotype,
genconf,
Datasets,
Individual,
gekkoUrl,
):
phenotype = constructPhenotype(Individual)
StratName = StrategyFileManager.checkStrategy(phenotype)
phenotype = {StratName: phenotype}
SCORE = evaluation.gekko.backtest.Evaluate(
genconf, Datasets, phenotype, gekkoUrl
)
return SCORE
def standardEvaluate(constructPhenotype,
genconf, Datasets, Individual, gekkoUrl):
phenotype = constructPhenotype(Individual)
phenotype = {Individual.Strategy: phenotype}
SCORE = evaluation.gekko.backtest.Evaluate(
genconf, Datasets, phenotype, gekkoUrl
)
return SCORE
def benchmarkEvaluate(constructPhenotype,
genconf, Datasets, Individual, gekkoUrl):
phenotype = constructPhenotype(Individual)
phenotype = {Individual.Strategy: phenotype}
SCORE = evaluation.benchmark.benchmark.Evaluate(
genconf, phenotype
)
return SCORE
def grabDatasets(conf):
# CHECK HOW MANY EVOLUTION DATASETS ARE SPECIFIED AT SETTINGS;
evolutionDatasetNames = ['dataset_source']
evolutionDatasets = []
for DS in range(1, 100):
datasetConfigName = 'dataset_source%i' % DS
if datasetConfigName in conf.dataset.__dict__.keys():
evolutionDatasetNames.append(datasetConfigName)
# --GRAB PRIMARY (EVOLUTION) DATASETS
for evolutionDatasetName in evolutionDatasetNames:
D = evaluation.gekko.dataset.selectCandlestickData(
conf.Global.GekkoURLs[0],
exchange_source=conf.dataset.__dict__[evolutionDatasetName],
minDays=conf.backtest.deltaDays
)
evolutionDatasets.append(datasetOperations.CandlestickDataset(*D))
try:
evolutionDatasets[-1].restrain(conf.dataset.dataset_span)
except Exception:
print(
'dataset_ span not configured for evolutionDatasetName. skipping...')
# --GRAB SECONDARY (EVALUATION) DATASET
try:
Avoid = evolutionDatasets[0].specifications['asset']
D = evaluation.gekko.dataset.selectCandlestickData(
conf.Global.GekkoURLs[0],
exchange_source=conf.dataset.eval_dataset_source,
avoidCurrency=None,
minDays=conf.backtest.deltaDays
)
if D is not None:
evaluationDatasets = [datasetOperations.CandlestickDataset(*D)]
evaluationDatasets[0].restrain(conf.dataset.eval_dataset_span)
else:
evaluationDatasets = []
except RuntimeError:
evaluationDatasets = []
print("Evaluation dataset not found.")
return evolutionDatasets, evaluationDatasets
def Generations(
EvaluationModule,
japonicusOptions,
EvaluationMode,
settings,
options,
web=None):
# --LOAD SETTINGS;
conf = makeSettings(settings)
# --APPLY COMMAND LINE GENCONF SETTINGS;
for parameter in conf.generation.__dict__.keys():
if parameter in options.__dict__.keys():
if options.__dict__[parameter] != None:
conf.generation[parameter] = options.__dict__[parameter]
GenerationMethod = promoterz.functions.selectRepresentationMethod(
japonicusOptions["GenerationMethod"]
)
# --MANAGE Evaluation Modes;
if EvaluationMode == 'indicator':
# global StrategyFileManager
StrategyFileManager = stratego.gekko_strategy.StrategyFileManager(
conf.Global.gekkoPath, conf.indicator
)
Evaluate = partial(indicatorEvaluate, StrategyFileManager)
Strategy = options.skeleton
# --for standard methods;
else:
Strategy = EvaluationMode
if options.benchmarkMode:
Evaluate = benchmarkEvaluate
evolutionDatasets, evaluationDatasets = [], []
conf.gen.minimumProfitFilter = None
else:
Evaluate = standardEvaluate
evolutionDatasets, evaluationDatasets = grabDatasets(
conf
)
# -- PARSE TARGET PARAMETERS
TargetParameters = promoterz.parameterOperations.flattenParameters(
japonicusOptions["TargetParameters"])
TargetParameters = promoterz.parameterOperations.parameterValuesToRangeOfValues(
TargetParameters, conf.generation.parameter_spread
)
GlobalTools = GenerationMethod.getToolbox(Strategy,
conf.generation,
TargetParameters)
RemoteHosts = evaluation.gekko.API.loadHostsFile(conf.Global.RemoteAWS)
conf.Global.GekkoURLs += RemoteHosts
if RemoteHosts:
print("Connected Remote Hosts:\n%s" % ('\n').join(RemoteHosts))
if EvaluationMode == 'indicator':
exit('Indicator mode is yet not compatible with multiple hosts.')
# --INITIALIZE LOGGER;
todayDate = time.strftime("%Y_%m_%d-%H.%M.%S", time.gmtime())
if evolutionDatasets:
ds_specs = evolutionDatasets[0].specifications
logfilename = "%s-%s-%s-%s-%s" % (
Strategy,
ds_specs['exchange'],
ds_specs['currency'],
ds_specs['asset'],
todayDate
)
else:
logfilename = "benchmark%s" % todayDate
Logger = promoterz.logger.Logger(logfilename)
# --PRINT RUNTIME ARGS TO LOG HEADER;
ARGS = ' '.join(sys.argv)
Logger.log(ARGS, target='Header')
# --SHOW PARAMETER INFO;
if Strategy:
Logger.log("Evolving %s strategy;\n" % Strategy)
Logger.log("evaluated parameters ranges:", target="Header")
for k in TargetParameters.keys():
Logger.log(
"%s%s%s\n" % (k, " " * (30 - len(k)), TargetParameters[k]),
target="Header"
)
# --LOG CONFIG INFO;
configInfo = json.dumps(conf.generation.__dict__, indent=4)
Logger.log(configInfo, target="Header", show=False)
# --SHOW DATASET INFO;
EvaluationModule.showPrimaryInfo(Logger,
evolutionDatasets,
evaluationDatasets)
# --INITIALIZE WORLD WITH CANDLESTICK DATASET INFO; HERE THE GA KICKS IN;
GlobalTools.register('Evaluate', Evaluate,
GlobalTools.constructPhenotype, conf.backtest)
GlobalTools.register("ApplyResult", EvaluationModule.ResultToIndividue)
GlobalTools.register("showIndividue", EvaluationModule.showIndividue)
# --THIS LOADS A DATERANGE FOR A LOCALE;
if options.benchmarkMode:
def onInitLocale(World):
Dataset = [
datasetOperations.CandlestickDataset(
{},
{
'from': 0,
'to': 0
}
)]
return Dataset
else:
def onInitLocale(World):
Dataset = datasetOperations.getLocaleDataset(World)
return Dataset
# Select run loops;
populationLoops = [promoterz.sequence.locale.standard_loop.execute]
worldLoops = [promoterz.sequence.world.parallel_world.execute]
# Initalize World;
World = promoterz.world.World(
GlobalTools=GlobalTools,
populationLoops=populationLoops,
worldLoops=worldLoops,
conf=conf,
TargetParameters=TargetParameters,
EnvironmentParameters={
'evolution': evolutionDatasets,
'evaluation': evaluationDatasets
},
onInitLocale=onInitLocale,
web=web,
)
World.logger = Logger
World.EvaluationStatistics = []
World.EvaluationModule = EvaluationModule
World.seedEnvironment()
World.logger.updateFile()
# INITALIZE EVALUATION PROCESSING POOL
World.parallel = World.EvaluationModule.EvaluationPool(
World,
conf.Global.GekkoURLs,
conf.backtest.ParallelBacktests,
conf.generation.showIndividualEvaluationInfo,
)
# --GENERATE INITIAL LOCALES;
for l in range(conf.generation.NBLOCALE):
World.generateLocale()
# --RUN EPOCHES;
while World.EPOCH < World.conf.generation.NBEPOCH:
World.runEpoch()
if conf.evalbreak.evaluateSettingsPeriodically and not options.benchmarkMode:
if not World.EPOCH % conf.evalbreak.evaluateSettingsPeriodically:
promoterz.evaluationBreak.showResults(World)
if not World.EPOCH % 10:
print("Total Evaluations: %i" % World.totalEvaluations)
# RUN ENDS. SELECT INDIVIDUE, LOG AND PRINT STUFF;
# FinalBestScores.append(Stats['max'])
print(World.EnvironmentParameters)
# After running EPOCHs, select best candidates;
if not options.benchmarkMode:
promoterz.evaluationBreak.showResults(World)
print("")
print("\t\t.RUN ENDS.")
================================================
FILE: japonicus/halt.py
================================================
#!/bin/python
import signal
import sys
import psutil
import os
import time
M = sys.version_info.major
m = sys.version_info.minor
if not M >= 3 or not m >= 6:
message = 'check your python version before running japonicus.'
message += ' Python>=3.6 is required. Python==%i.%i detected.' % (M, m)
print(message)
exit(1)
Aware = False
def userExit(x, y):
parent = psutil.Process(os.getpid())
global Aware
if not Aware:
print("\n\nAborted by user. (SIGINT)\n\n")
Aware = True
try:
for child in parent.children(recursive=True):
child.kill()
time.sleep(2)
exit(0)
except (SystemExit):
raise
signal.signal(signal.SIGINT, userExit)
================================================
FILE: japonicus/interface.py
================================================
#!/bin/python
import evaluation
def showTitleDisclaimer(backtestsettings, VERSION):
TITLE = """
██╗ █████╗ ██████╗ ██████╗ ███╗ ██╗██╗ ██████╗██╗ ██╗███████╗
██║██╔══██╗██╔══██╗██╔═══██╗████╗ ██║██║██╔════╝██║ ██║██╔════╝
██║███████║██████╔╝██║ ██║██╔██╗ ██║██║██║ ██║ ██║███████╗
██ ██║██╔══██║██╔═══╝ ██║ ██║██║╚██╗██║██║██║ ██║ ██║╚════██║
╚█████╔╝██║ ██║██║ ╚██████╔╝██║ ╚████║██║╚██████╗╚██████╔╝███████║
╚════╝ ╚═╝ ╚═╝╚═╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝ ╚═════╝ ╚═════╝ ╚══════╝
"""
TITLE += "\t EVOLUTIONARY GENETIC ALGORITHMS"
try:
print(TITLE, end="")
except UnicodeEncodeError or SyntaxError:
print("\nJAPONICUS\n")
print('\t' * 4 + 'v%.2f' % VERSION)
print()
profitDisclaimer = "The profits reported here depends on backtest interpreter function;"
interpreterFuncName = backtestsettings['interpreteBacktestProfit']
interpreterInfo = evaluation.gekko.backtest.getInterpreterBacktestInfo(
interpreterFuncName)
print("%s \n\t%s\n" % (profitDisclaimer, interpreterInfo))
================================================
FILE: japonicus/japonicus.py
================================================
#!/bin/python
from . import halt, Settings, interface
from time import sleep
import random
from threading import Thread
from .evolution_generations import Generations
import datetime
import os
import waitress
import promoterz
from version import VERSION
def launchWebEvolutionaryInfo():
print("WEBSERVER MODE")
webpageTitle = "japonicus evolutionary statistics - v%.2f" % VERSION
webApp, webServer = promoterz.webServer.core.build_server(webpageTitle)
webServerProcess = Thread(
target=waitress.serve,
kwargs={
"app": webServer,
"listen": "0.0.0.0:8182"
}
)
webServerProcess.start()
return webApp
def buildSettingsOptions(optionparser, settingSubsets):
settings = Settings.getSettings(SettingsFiles=settingSubsets)
# PARSE GENCONF & DATASET COMMANDLINE ARGUMENTS;
for settingSubset in settingSubsets:
parser = promoterz.metaPromoterz.generateCommandLineArguments(
optionparser,
settings[settingSubset])
options, args = parser.parse_args()
for settingSubset in settingSubsets:
settings[settingSubset] =\
promoterz.metaPromoterz.applyCommandLineOptionsToSettings(
options,
settings[settingSubset]
)
return settings, options
def loadEvaluationModule():
req = [
"validateSettings",
"showStatistics"
]
pass
class JaponicusSession():
def __init__(self, EvaluationModule, settings, options):
# ADDITIONAL MODES;
markzero_time = datetime.datetime.now()
print()
# show title;
interface.showTitleDisclaimer(settings['backtest'], VERSION)
self.web_server = launchWebEvolutionaryInfo()\
if options.spawn_web else None
sleep(1)
if not EvaluationModule.validateSettings(settings):
exit(1)
# --SELECT STRATEGY;
if options.random_strategy:
Strategy = ""
GekkoStrategyFolder = os.listdir(settings['Global']['gekkoPath'] + '/strategies')
while Strategy + '.js' not in GekkoStrategyFolder:
if Strategy:
print(
"Strategy %s descripted on settings but not found on strat folder." %
Strategy
)
Strategy = random.choice(list(settings['strategies'].keys()))
print("> %s" % Strategy)
elif options.strategy:
Strategy = options.strategy
elif not options.skeleton:
print("No strategy specified! Use --strat or go --help")
exit(1)
# --LAUNCH GENETIC ALGORITHM;
if options.genetic_algorithm:
japonicusOptions = {
"GenerationMethod": None,
"TargetParameters": None
}
japonicusOptions["GenerationMethod"] =\
'chromosome' if options.chromosome_mode else 'oldschool'
if options.skeleton:
EvaluationMode = 'indicator'
AllIndicators = Settings.getSettings()['indicators']
TargetParameters = Settings.getSettings()['skeletons'][options.skeleton]
for K in AllIndicators.keys():
if type(AllIndicators[K]) != dict:
TargetParameters[K] = AllIndicators[K]
elif AllIndicators[K]['active']:
TargetParameters[K] = AllIndicators[K]
TargetParameters[K]['active'] = (0, 1)
japonicusOptions["TargetParameters"] = TargetParameters
if not TargetParameters:
print("Bad configIndicators!")
exit(1)
else:
EvaluationMode = Strategy
# READ STRATEGY PARAMETER RANGES FROM TOML;
try:
TOMLData = promoterz.TOMLutils.preprocessTOMLFile(
"strategy_parameters/%s.toml" % Strategy
)
except FileNotFoundError:
print("Failure to find strategy parameter rules for " +
"%s at ./strategy_parameters" % Strategy)
gekkoParameterPath = "%s/config/strategies/%s.toml" %\
(settings['Global']['gekkoPath'], Strategy)
print("Trying to locate strategy parameters at %s" %
gekkoParameterPath)
TOMLData = promoterz.TOMLutils.preprocessTOMLFile(
gekkoParameterPath)
japonicusOptions["TargetParameters"] =\
promoterz.TOMLutils.TOMLToParameters(TOMLData)
# RUN ONE EQUAL INSTANCE PER REPEATER NUMBER SETTINGS,
# SEQUENTIALLY...
for s in range(options.repeater):
Generations(
EvaluationModule,
japonicusOptions,
EvaluationMode,
settings,
options,
web=self.web_server
)
deltatime = datetime.datetime.now() - markzero_time
print("Run took %i seconds." % deltatime.seconds)
if options.spawn_web:
print('Statistics info server still runs...')
================================================
FILE: japonicus/options.py
================================================
import optparse
parser = optparse.OptionParser()
parser.add_option(
'-g', '--genetic', dest='genetic_algorithm', action='store_true', default=False,
help="Genetic Algorithm evolution mode."
)
parser.add_option(
'-c', '--chromosome', dest='chromosome_mode', action='store_true', default=False,
help="Alternative internal representation of parameters for Genetic Algorithm mode."
)
parser.add_option(
'-b', '--bayesian', dest='bayesian_optimization', action='store_true', default=False,
help='Bayesian evolution mode.'
)
parser.add_option(
'-k', '--gekko', dest='spawn_gekko', action='store_true', default=False,
help="Launch gekko instance."
)
parser.add_option(
'-r', '--random', dest='random_strategy', action='store_true', default=False,
help="Run on random strategy."
)
parser.add_option(
'-e', '--benchmark', dest='benchmarkMode', action='store_true',
default=False,
help="Run GA benchmark mode. Strategy names are restricted to specific strats."
)
parser.add_option(
'-w', '--web', dest='spawn_web', action='store_true', default=False,
help="Launch japonicus web server showing evolutionary statistics."
)
parser.add_option('--repeat ', dest='repeater', type=int, default=1)
parser.add_option('--strat ', dest='strategy', default=None)
parser.add_option('--skeleton ', dest='skeleton', default=None)
================================================
FILE: japonicus-run
================================================
#!/bin/python
import os
import japonicus
import evaluation
os.chdir(os.path.dirname(os.path.realpath(__file__)))
settings, options = japonicus.buildSettingsOptions(
japonicus.options.parser,
evaluation.gekko.SettingsFiles
)
japonicus.JaponicusSession(evaluation.gekko, settings, options)
================================================
FILE: jlivetrader.py
================================================
#!/bin/python
import os
import optparse
import json
import livetrader.exchangeMonitor
import livetrader.gekkoTrigger
import livetrader.gekkoChecker
try:
import livetrader.strategyRanker
except Exception:
pass
parser = optparse.OptionParser()
parser.add_option('-b', '--balance',
dest='balanceChecker', action='store_true', default=False)
parser.add_option('-t', '--trigger ',
dest='botTrigger', type='str', default='')
parser.add_option('-c', dest='runningBotChecker',
action='store_true', default=False)
parser.add_option('-l', dest='tradingBot', action='store_true',
default=False)
parser.add_option('--candleSize ',
dest='candleSize', type='int', default=5)
parser.add_option('--strat ', dest='strategy',
type='str', default='')
parser.add_option('--param ', dest='alternativeParameters',
type='str', default=None)
parser.add_option('-k', dest='killGekkoBots', action='store_true',
default=False,
help='Destroy all running gekko bot instances.')
parser.add_option('-s', dest='viewLastTrades', action='store_true',
default=False,
help='Show last trades done by bots.')
options, args = parser.parse_args()
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.realpath(__file__)))
exchange = livetrader.exchangeMonitor.Exchange('binance')
if options.balanceChecker:
totalUSD = exchange.getUserBalance()
print("net weight at %s: US$T%.2f" % (
exchange.name,
totalUSD)
)
if options.botTrigger:
allPairs = exchange.getAssets()
assetCurrencyPairs = exchange.parseAssets(allPairs)
Stratlist = [options.botTrigger]
exchangeConfPath =\
exchange.conf.binanceAssetCurrencyTargetFilePath
if exchangeConfPath:
exchangeMarketData = exchange.generateMarketsJson(
assetCurrencyPairs)
exchangeConfPath = os.path.join(exchangeConfPath,
'binance-markets.json')
with open(exchangeConfPath, 'w') as F:
json.dump(exchangeMarketData, F, indent=2)
livetrader.gekkoTrigger.launchBatchTradingBots(
assetCurrencyPairs,
Stratlist,
options
)
if options.runningBotChecker:
ranker = livetrader.strategyRanker.strategyRanker()
ranker.loadStrategyRankings()
userOrderHistory = exchange.getRecentOrders()
for M in userOrderHistory.keys():
marketOrderHistory = userOrderHistory[M]
if marketOrderHistory:
information = json.dumps(marketOrderHistory, indent=2)
print(information)
livetrader.gekkoChecker.checkGekkoRunningBots(exchange,
ranker, options)
if options.killGekkoBots:
livetrader.gekkoChecker.stopGekkoBots()
if options.viewLastTrades:
Orders = exchange.getRecentOrders()
print(json.dumps(Orders, indent=2))
================================================
FILE: livetrader/exchangeMonitor.py
================================================
#!/bin/python
import ccxt
import json
from japonicus import Settings
import time
class Exchange():
def __init__(self, name):
self.name = name
self.conf = Settings.makeSettings(Settings.loadTomlSettings(name))
secret = open(self.conf.credentialsFilePath).read()
secret = secret.split('\n')
self.API = ccxt.binance({
'apiKey': secret[0],
'secret': secret[1]
})
self.API.load_markets()
def getCotations(self):
return self.fetchAssetPrices(self.getMarketsOfCurrency())
def parseAsset(self, Asset):
P = [float(Asset[code]) for code in ['free', 'locked']]
return P[0], P[1]
def fetchAssetPrices(self, Symbols):
Prices = {}
for Symbol in Symbols:
Cotation = self.API.fetch_ticker(Symbol)
Prices[Symbol] = float(Cotation['info']['lastPrice'])
return Prices
def getAveragePrices(self):
Cotations = self.getCotations()
AllCotations = list(Cotations.keys())
averagePrices = sum([Cotations[S] for S in AllCotations]) / len(AllCotations)
return averagePrices
def getMarketsOfCurrency(self, currency='USDT'):
return [S for S in self.API.symbols if '/%s' % currency in S]
def getUserBalance(self, Verbose=False):
Balance = self.API.fetch_balance()['info']['balances']
totalUSD = 0
Cotations = self.getCotations()
for Asset in Balance:
Free, Locked = self.parseAsset(Asset)
if Free or Locked:
if Verbose:
print(Asset)
if Asset['asset'] == 'USDT':
Symbol = 'USDT'
totalAsset = Free + Locked
assetValue = totalAsset
if Verbose:
print("%.2f USDT" % totalAsset)
else:
Symbol = '%s/USDT' % Asset['asset']
if Symbol in self.API.symbols:
price = Cotations[Symbol]
if Verbose:
print("%s price %.2f" % (Asset['asset'], price))
totalAsset = Free + Locked
assetValue = (totalAsset * price)
else:
continue
totalUSD += assetValue
if Verbose:
print('--')
print(totalAsset)
print(assetValue)
print(totalUSD)
print()
return totalUSD
def getAssets(self):
Assets = [A for A in self.API.symbols if 'USDT' in A]
return Assets
def parseAssets(self, assets):
LIST = []
for Asset in assets:
N = Asset.split('/')
A = {
'EXCHANGE': self.name,
'ASSET': N[0],
'CURRENCY': N[1]
}
LIST.append(A)
return LIST
def generateMarketsJson(self, Assets):
Assets = self.getAssets()
marketData = []
assetList = []
exchangeAssetInfo = self.API.publicGetExchangeInfo()['symbols']
for Asset in Assets:
pair = Asset.split('/')
assetList.append(pair[0])
pair.reverse()
orderInfo = None
for pairInfo in exchangeAssetInfo:
if pairInfo['symbol'] == Asset.replace('/', ''):
allFilters = {}
for Filter in pairInfo['filters']:
del Filter['filterType']
allFilters.update(Filter)
orderInfo = {
"amount": allFilters['minQty'],
"price": allFilters['minPrice'],
"order": 1
}
break
if orderInfo is None:
print("Failed to grab data for %s" % Asset)
continue
pairEntry = {
"pair": pair,
"minimalOrder": orderInfo
}
marketData.append(pairEntry)
fullMarketData = {
"assets": assetList,
"currencies": ["USDT"],
"markets": marketData
}
return fullMarketData
def getRecentOrders(self, pastTimeRangeDays=2):
userOrderHistory = {}
for Market in self.getAssets():
pastTimeRange = pastTimeRangeDays * 24 * 3600
sinceTimestamp = (time.time() - pastTimeRange) * 1000
Orders = self.API.fetch_my_trades(Market, since=sinceTimestamp)
userOrderHistory[Market] = Orders
return userOrderHistory
def getPriceHistory(self):
candlestickData = {}
for Market in self.getAssets():
candlestickData[Market] = self.API.fetch_ohlcv(Market)
return candlestickData
================================================
FILE: livetrader/gekkoChecker.py
================================================
#!/bin/python
from . import gekkoTrigger
try:
from . import assetAllocator
except Exception:
pass
from dateutil import parser as dateparser
import datetime
import csv
import re
import random
from subprocess import Popen, PIPE
import pytoml
import os
import time
import json
def calculateMostIndicatedAssets(exchange):
candlestickData = exchange.getPriceHistory()
Assets = assetAllocator.selectMostProbableAssets(candlestickData)
Assets = [{'EXCHANGE': exchange.name,
'ASSET': a.split('/')[0],
'CURRENCY': a.split('/')[1]} for a in Assets]
return Assets
def stopGekkoBots():
PS = ['ps', 'aux']
runningProcs = Popen(PS,
stdout=PIPE, stderr=PIPE)
runningProcs = runningProcs.stdout.read().decode('utf-8').split('\n')
killPIDs = []
for proc in runningProcs:
if 'gekko/core' in proc:
PID = re.findall("\d\d\d+", proc)[0]
killPIDs.append(PID)
print(killPIDs)
for PID in killPIDs:
N = Popen(['kill', '-9', PID], stdout=PIPE)
N.communicate()
def interpreteRunningBotStatistics(runningBots):
allBotStrategies = []
runningTimes = []
for B in runningBots.keys():
Bot = runningBots[B]
if Bot["config"]["type"] == 'tradebot':
botCurrentStrategy = Bot["config"]["tradingAdvisor"]["method"]
allBotStrategies.append(botCurrentStrategy)
elif Bot["config"]["type"] == 'market watcher':
fC = dateparser.parse(Bot["events"]["initial"]["candle"]["start"])
lC = dateparser.parse(Bot["events"]["latest"]["candle"]["start"])
delta = (lC - fC).seconds
runningTime = delta
runningTimes.append(runningTime)
else:
print("Odd runningBot found:")
print(json.dumps(Bot, indent=2))
return runningTimes, allBotStrategies
def getParameterSettingsPath(parameterName):
N = os.path.join('strategy_parameters',
parameterName) + '.toml'
return N
def operateStrategyScores(exchange, ranker,
Balances, runningTimeHours,
currentPortfolioStatistics, runningBotStrategies):
print("Rebooting gekko trading bots.")
markzeroTime = datetime.timedelta(minutes=runningTimeHours*3600)
predictedStartTime = datetime.datetime.now() - markzeroTime
# APPLY LAST SCORE TO STRATEGIES;
ranker.loadStrategyRankings()
def makeBalanceScore(entry):
return (float(entry['BALANCE']) /
float(entry['AVERAGE_PRICE']))
pastCorrespondingScore = None
for row in Balances:
balanceDate = dateparser.parse(row['TIME'])
timeDelta = predictedStartTime - balanceDate
minuteDelta = abs(timeDelta.seconds) / 60
if minuteDelta < 60:
pastCorrespondingScore = makeBalanceScore(row)
if pastCorrespondingScore is not None:
currentScore =\
makeBalanceScore(currentPortfolioStatistics)
botRunScore = currentScore / pastCorrespondingScore * 100
normalizedBotRunScore = botRunScore / runningTimeHours
runningStrategy = None
for Strategy in ranker.Strategies:
equalStrats = True
strategyParameters = pytoml.load(open(
getParameterSettingsPath(Strategy.parameters)))
print(runningBotStrategies[-1])
comparateParameters =\
runningBotStrategies[-1]['params']
for param in comparateParameters.keys():
if type(param) == dict:
continue
if param not in strategyParameters.keys():
equalStrats = False
break
if strategyParameters[param] !=\
comparateParameters[param]:
equalStrats = False
break
if equalStrats:
runningStrategy = Strategy
break
if runningStrategy:
print("Runnnig strategy found at scoreboard.")
runningStrategy.profits.append(normalizedBotRunScore)
else:
print("Running strategy not found at scoreboard.")
# WRITE NEW STRATEGY SCORES;
ranker.saveStrategyRankings()
def checkGekkoRunningBots(exchange, ranker, options):
runningBots = gekkoTrigger.getRunningGekkos()
BalancesFields = ['TIME', 'BALANCE', 'AVERAGE_PRICE']
selectorSigma = exchange.conf.strategySelectorSigma
allPairs = exchange.getAssets()
assetCurrencyPairs = exchange.parseAssets(allPairs)
try:
Balances = csv.DictReader(open('balances.csv'))
except FileNotFoundError:
print("Balances file not found.")
Balances = []
Balances = [row for row in Balances]
wBalances = csv.DictWriter(open('balances.csv', 'w'),
fieldnames=BalancesFields)
wBalances.writeheader()
for N in Balances:
wBalances.writerow(N)
currentPortfolioValue = exchange.getUserBalance()
print("Net weight %.2f USD" % currentPortfolioValue)
currentPortfolioStatistics = {
'TIME': str(datetime.datetime.now()),
'BALANCE': currentPortfolioValue,
'AVERAGE_PRICE': exchange.getAveragePrices()
}
wBalances.writerow(currentPortfolioStatistics)
if runningBots:
runningTimes, runningBotStrategies =\
interpreteRunningBotStatistics(runningBots)
if runningTimes and runningBotStrategies:
averageRunningTime = sum(runningTimes) / len(runningTimes)
runningTimeHours = averageRunningTime / 3600
targetMinimumRunningHours =\
exchange.conf.strategyRunTimePeriodHours
# if target running time is reached;
if runningTimeHours > targetMinimumRunningHours:
operateStrategyScores(exchange, ranker,
Balances, runningTimeHours,
currentPortfolioStatistics,
runningBotStrategies)
Strategy = ranker.selectStrategyToRun(selectorSigma)
stopGekkoBots()
time.sleep(60)
selectedAssetCurrencyPairs = calculateMostIndicatedAssets(exchange)
gekkoTrigger.launchBatchTradingBots(
selectedAssetCurrencyPairs,
[Strategy.strategy],
options
)
else:
print("Target runtime not reached.")
else:
ranker.loadStrategyRankings()
print("Launching bots on idle gekko instance.")
Strategy = ranker.selectStrategyToRun(selectorSigma)
selectedAssetCurrencyPairs = calculateMostIndicatedAssets(exchange)
print(assetCurrencyPairs)
print(selectedAssetCurrencyPairs)
gekkoTrigger.launchBatchTradingBots(
selectedAssetCurrencyPairs,
[Strategy.strategy],
options
)
================================================
FILE: livetrader/gekkoTrigger.py
================================================
#!/bin/python
import time
from evaluation.gekko.API import httpPost
from evaluation.gekko.dataset import epochToString
import requests
import json
from promoterz import TOMLutils
def runTradingBot(botSpecifications, Strategy, options, TradingBot=False):
URL = "http://localhost:3000/api/startGekko"
if not Strategy:
Strategy = botSpecifications['STRATEGY']
print("Starting bot running %s for %s/%s at %s." % (
Strategy,
botSpecifications['ASSET'],
botSpecifications['CURRENCY'],
botSpecifications['EXCHANGE']))
traderParameters = {
"tradingAdvisor": {
"enabled": 'true',
"method": Strategy,
"candleSize": options.candleSize,
"historySize": 40
}
}
watchSettings = getWatchSettings(botSpecifications)
traderParameters.update(getTraderBaseParameters())
traderParameters.update(watchSettings)
if TradingBot:
traderParameters['type'] = "tradebot"
traderParameters['trader'] = {'enabled': 'true'}
else:
traderParameters['type'] = "paper trader"
traderParameters['paperTrader'] = {
"feeMaker": 0.25,
"feeTaker": 0.25,
"feeUsing": "maker",
"slippage": 0.05,
"simulationBalance": {
"asset": 0,
"currency": 100
},
"reportRoundtrips": 'true',
"enabled": 'true'
}
commonPath = 'strategy_parameters/%s.toml'
if options.alternativeParameters:
parameterPath = commonPath % options.alternativeParameters
else:
parameterPath = commonPath % Strategy
strategySettings = TOMLutils.preprocessTOMLFile(
parameterPath)
strategySettings = TOMLutils.TOMLToParameters(strategySettings)
traderParameters[Strategy] = strategySettings
watcherSettings = getWatcherBaseParameters()
watcherSettings.update(watchSettings)
ExistingWatcher = checkWatcherExists(watchSettings)
if not ExistingWatcher:
print("Creating watcher for %s!" %
watchSettings['watch']['exchange'])
Watcher = httpPost(URL, watcherSettings)
time.sleep(4)
else:
print("Watcher for %s-%s exists! Creating none." %
(watchSettings['watch']['exchange'],
watchSettings['watch']['asset']))
Watcher = None
traderParameters
Trader = httpPost(URL, traderParameters)
return Watcher, Trader
def getTraderBaseParameters():
Request = {
"market": {
"type": "leech",
"from": epochToString(time.time())
},
"mode": "realtime",
"adviceWriter" : {
"enabled": 'false',
"muteSoft": 'false'
},
"adviceLogger": {
"enabled": 'false',
"muteSoft": 'false'
},
"candleWriter": {
"enabled": 'false',
"adapter": "sqlite"
},
"type": "paper trader",
"performanceAnalyzer": {
"riskFreeReturn": 2,
"enabled": 'false'
},
"valid": 'true'
}
return Request
def getWatchSettings(coinInfo):
W = {
"watch": {
"exchange": coinInfo["EXCHANGE"],
"currency": coinInfo["CURRENCY"].upper(),
"asset": coinInfo["ASSET"].upper()
}
}
return W
def checkWatcherExists(Watch):
gekkoInstances = getRunningGekkos()
Watch = Watch['watch']
checkKeys = ['asset', 'currency', 'exchange']
for instanceName in gekkoInstances.keys():
instance = gekkoInstances[instanceName]
if instance['type'] == 'watcher':
FOUND = True
watcherTargetAssetCurrency = instance['config']['watch']
for C in checkKeys:
if watcherTargetAssetCurrency[C] != Watch[C]:
FOUND = False
break
if FOUND:
return instance['id']
return False
def getRunningGekkos():
try:
W = requests.get('http://localhost:3000/api/gekkos')
except requests.exceptions.ConnectionError:
print("Gekko is not running.")
return {}
runningGekkos = json.loads(W.text)['live']
return runningGekkos
def getWatcherBaseParameters():
Request = {
"candleWriter": {
"enabled": "false",
"adapter": "sqlite"
},
"type": "market watcher",
"mode": "realtime"
}
return Request
def launchBatchTradingBots(assetCurrencyPairs, Stratlist, options):
for assetCurrencyPair in assetCurrencyPairs:
for Strategy in Stratlist:
w, t = runTradingBot(assetCurrencyPair, Strategy,
options, TradingBot=True)
================================================
FILE: livetrader/japonicusResultSelector.py
================================================
#!/bin/python
import os
import csv
import shutil
import names
from . import exchangeMonitor
def readResultFolder(strategyName, runLogFolderPath, retrievalCount=1):
evalBreaksLogFilename = os.path.join(runLogFolderPath, 'evaluation_breaks.csv')
if not os.path.isfile(evalBreaksLogFilename):
print("Evaluation break log file not found.")
return False
evalBreakLogs = open(evalBreaksLogFilename)
evalBreakLogs = csv.DictReader(evalBreakLogs)
positiveResults = []
for result in evalBreakLogs:
if result['evaluation'] > 0 and result['secondary'] > 0:
if len(list(result.keys())) > 2:
result['score'] = result['evaluation'] + result['secondary']
positiveResults.append(result)
else:
print("Naive logging system detected, from older japonicus version.")
print("Unable to check result file.")
if not positiveResults:
print("No positive results found!")
return False
positiveResults = sorted(positiveResults,
key=lambda r: r['score'], reverse=True)
parameterName = strategyName + names.get_full_name()
R = positiveResults[0]
stratPath = os.path.join(R['filepath'])
shutil.copy()
strategyRankings = exchangeMonitor.loadStrategyRankings()
newEntry = exchangeMonitor.strategyParameterSet(
{
'strategy': strategyName,
'parameters': parameterName,
'profits': []
}
)
strategyRankings.append(newEntry)
exchangeMonitor.saveStrategyRankings(strategyRankings)
return True
def sweepLogFolder():
availableLogs = os.listdir('logs')
for folder in availableLogs:
print(folder)
strategyName = ''
readResult = readResultFolder(strategyName, folder)
================================================
FILE: livetrader/strategyRanker.py
================================================
#!/bin/python
import json
import pytoml
import random
class strategyRanker():
def __init__(self):
self.Strategies = []
def loadStrategyRankings(self):
W = json.load(open("gekkoStrategyRankings.json"))
self.Strategies = []
for s in W:
S = strategyParameterSet(s)
self.Strategies.append(S)
def saveStrategyRankings(self):
outputList = []
for strategy in self.Strategies:
outputList.append(strategy.toJson())
json.dump(outputList, open("gekkoStrategyRankings.json", 'w'))
def selectStrategyToRun(self, sigma=10):
# SELECT AND LAUNCH TRADING BOT BATCH WITH SELECTED STRATEGY;
if random.random() < sigma / 100:
Strategy = sorted(self.Strategies,
key=lambda s: s.getScore(), reverse=True)[0]
else:
Strategy = random.choice(self.Strategies)
return Strategy
class strategyParameterSet():
def __init__(self, jsonData):
self.Attributes = ['strategy', 'parameters', 'profits']
self.fromJson(jsonData)
def fromJson(self, jsonData):
for Name in self.Attributes:
self.__dict__[Name] = jsonData[Name]
def toJson(self):
jsonData = {}
for Name in self.Attributes:
jsonData[Name] = self.__dict__[Name]
return jsonData
def loadParameterSet(self):
self.parameterSet = pytoml.load(open(self.parameters))
def getScore(self):
if self.profits:
return sum(self.profits) / len(self.profits)
else:
return 0
================================================
FILE: promoterz/README.md
================================================
A python module specialized on genetic algorithms using various representations.
Intended to evolve a dict of parameters, nested or not, provided with respective ranges.
```
sampleParameters = {'short': (6,8),
'persist': (1,50),
'variableY': (2,6),
'ROP_weight': (5,7),
'santa': (1,10),
'thresholds': {
'top': (8,11),
'bottom': (17,32)
},
'IL12': (3,8)}
```
================================================
FILE: promoterz/TOMLutils.py
================================================
#!/bin/python
import re
import pytoml
def preprocessTOMLFile(filepath):
f = open(filepath)
return f
def TOMLToParameters(TOMLDATA):
Parameters = pytoml.load(TOMLDATA)
for Parameter in Parameters.keys():
if type(Parameter) == str:
if '=' in Parameter:
Parameter = Parameter.replace('=', '')
Parameter = float(Parameter)
return Parameters
def parametersToTOML(Settings):
Text = pytoml.dumps(Settings)
return Text
================================================
FILE: promoterz/__init__.py
================================================
#!/bin/python
from .import functions
from .import supplement, validation, parameterOperations
from .import evolutionHooks
from .import world, locale
from .import evaluationPool
from .import logger
from .import metaPromoterz
from .import sequence
from .import webServer
from .import TOMLutils
from .import evaluationBreak
================================================
FILE: promoterz/environment.py
================================================
#!/bin/python
class Environment():
def __init__(self, propertyGenenerator):
self.w = None
================================================
FILE: promoterz/evaluationBreak.py
================================================
#!/bin/python
import random
import json
import csv
from deap import tools
import promoterz
import evaluation
from . import TOMLutils
def showResults(World):
validationDatasets = []
# IS EVALUATION DATASET LOADED? USE IT;
if World.EnvironmentParameters['evaluation']:
useSecondary = 'evaluation'
else:
useSecondary = 'evolution'
# LOAD EVALUATION DATASET;
sourceDataset = random.choice(World.EnvironmentParameters[useSecondary])
getter = evaluation.gekko.datasetOperations.getRandomSectorOfDataset
for NB in range(World.conf.evalbreak.proofSize):
newDataset = getter(sourceDataset, World.conf.backtest.deltaDays)
validationDatasets.append(newDataset)
for LOCALE in World.locales:
LOCALE.population = [ind for ind in LOCALE.population
if ind.fitness.valid]
# SELECT BEST INDIVIDUALS;
B = World.conf.evalbreak.NBBESTINDS
BestIndividues = tools.selBest(LOCALE.population, B)
Z = min(World.conf.evalbreak.NBADDITIONALINDS,
len(LOCALE.population) - B)
Z = max(0, Z)
# SELECT ADDITIONAL INDIVIDUALS;
AdditionalIndividues = promoterz.evolutionHooks.Tournament(
LOCALE.population, Z, Z * 2
)
print("%i selected;" % len(AdditionalIndividues))
AdditionalIndividues = [
x for x in AdditionalIndividues if x not in BestIndividues
]
setOfToEvaluateIndividues = BestIndividues + AdditionalIndividues
print("%i selected;" % len(setOfToEvaluateIndividues))
print("Selecting %i+%i individues, random test;" % (B, Z))
currentSessionBreakResults = []
# EVALAUTE EACH SELECTED INDIVIDUE;
for FinalIndividue in setOfToEvaluateIndividues:
GlobalLogEntry = {}
proof = stratSettingsProofOfViability
AssertFitness, FinalProfit, Results = proof(
World, FinalIndividue, validationDatasets
)
LOCALE.lastEvaluation = FinalProfit
GlobalLogEntry['evaluation'] = FinalProfit
World.logger.log(
"\n\n\nTesting Strategy of %s @ EPOCH %i:\n" % (
LOCALE.name,
LOCALE.EPOCH)
)
for R, Result in enumerate(Results):
World.logger.log(
evaluation.gekko.showBacktestResult(Result,
validationDatasets[R]) + '\n')
World.logger.log(
'\nRelative profit on evolution dataset: %.3f' %
FinalProfit)
if AssertFitness or FinalProfit > 50:
World.logger.log("Current parameters are viable.")
else:
World.logger.log("Current parameters fails.")
if not World.conf.Global.showFailedStrategies:
World.logger.log(
"Skipping further tests on current parameters.",
show=False)
continue
FinalIndividueSettings = World.tools.constructPhenotype(
FinalIndividue)
# -- PREFETCH TOMLSettings;
TOMLSettings = TOMLutils.parametersToTOML(
FinalIndividueSettings
)
# --EVALUATION DATASET TEST AND REPORT;
if World.EnvironmentParameters['evaluation']:
evalDataset = random.choice(
World.EnvironmentParameters['evaluation'])
evalDataset = getter(evalDataset, 0)
secondaryResults = World.parallel.evaluateBackend(
[evalDataset], 0, [FinalIndividue]
)
print()
# print(secondaryResults)
backtestResult = secondaryResults[0][0]
World.logger.log(
"Relative profit on evaluation dataset: \n\t%s" %
evaluation.gekko.showBacktestResult(backtestResult))
LOCALE.lastEvaluationOnSecondary =\
backtestResult['relativeProfit']
GlobalLogEntry['secondary'] =\
backtestResult['relativeProfit']
currentSessionBreakResults.append((backtestResult['relativeProfit'],
TOMLSettings))
else:
print("Evaluation dataset is disabled.")
# LOG AND SHOW PARAMETERS;
Show = json.dumps(FinalIndividueSettings, indent=2)
print("~" * 18)
World.logger.log(" %.3f final profit ~~~~" % FinalProfit)
print(" -- Settings for Gekko config.js -- ")
World.logger.log(Show)
print(" -- Settings for Gekko --ui webpage -- ")
World.logger.log(TOMLSettings)
paramsFilename = "%s-EPOCH%i" % (LOCALE.name,
LOCALE.EPOCH)
World.logger.saveParameters(paramsFilename, TOMLSettings)
GlobalLogEntry['filename'] = paramsFilename
print("\nRemember to check MAX and MIN values for each parameter.")
print("\tresults may improve with extended ranges.")
World.EvaluationStatistics.append(GlobalLogEntry)
# SAVE GLOBAL EVALUATION LOGS;
evaluationBreaksFilename = 'logs/evaluation_breaks.csv'
if World.EvaluationStatistics:
fieldnames = list(World.EvaluationStatistics[0].keys())
with open(evaluationBreaksFilename, 'w') as f:
GlobalEvolutionSummary = csv.DictWriter(f, fieldnames)
GlobalEvolutionSummary.writeheader()
World.logger.log('\t'.join(GlobalEvolutionSummary.fieldnames),
target="Summary",
show=False, replace=True)
for n in World.EvaluationStatistics:
GlobalEvolutionSummary.writerow(n)
with open(evaluationBreaksFilename) as f:
GlobalEvolutionSummary = csv.DictReader(f)
for row in GlobalEvolutionSummary:
World.logger.log('\t'.join([row[x] for x in row.keys()]),
target="Summary",
show=False, replace=False)
World.logger.updateFile()
# UPDATE WEB SERVER VISUALIZATION;
if World.web:
World.web.updateEvalBreakGraph(World.web, World.EvaluationStatistics)
World.web.resultParameters += currentSessionBreakResults
def stratSettingsProofOfViability(World, Individual, Datasets):
AllProofs = []
# Datasets = [[x] for x in Datasets]
Results = World.parallel.evaluateBackend(Datasets, 0, [Individual])
for W in Results[0]:
AllProofs.append(W['relativeProfit'])
testMoney = 0
for value in AllProofs:
testMoney += value
check = [x for x in AllProofs if x > 0]
Valid = sum(check) == len(AllProofs)
return Valid, testMoney, Results[0]
================================================
FILE: promoterz/evaluationPool.py
================================================
#!/bin/python
import time
import random
import itertools
from multiprocessing import Pool, TimeoutError
from multiprocessing.pool import ThreadPool
class EvaluationPool():
def __init__(self,
World,
Urls, poolsize, individual_info):
self.World = World
self.Urls = Urls
self.lasttimes = [0 for x in Urls]
self.lasttimesperind = [0 for x in Urls]
self.poolsizes = [poolsize for x in Urls]
self.individual_info = individual_info
def evaluateBackend(self, datasets, I, inds):
stime = time.time()
dateInds = list(itertools.product(datasets, inds))
# print(list(dateInds))
Q = [
([dataset], Ind, self.Urls[I])
for dataset, Ind in dateInds
]
P = Pool(self.poolsizes[I])
fitnesses = P.starmap(self.World.tools.Evaluate, Q)
P.close()
P.join()
delta_time = time.time() - stime
return fitnesses, delta_time
def evaluatePopulation(self, locale):
individues_to_simulate = [
ind for ind in locale.population if not ind.fitness.valid
]
props = self.distributeIndividuals(individues_to_simulate)
args = [
[
locale.Dataset,
I,
props[I],
]
for I in range(len(self.Urls))
]
pool = ThreadPool(len(self.Urls))
results = []
try:
for A in args:
results.append(pool.apply_async(self.evaluateBackend, A))
pool.close()
except (SystemExit, KeyboardInterrupt):
print("Aborted by user.")
exit(0)
TimedOut = []
for A in range(len(results)):
try:
perindTime = 3 * self.lasttimesperind[A]\
if self.lasttimesperind[A] else 12
timeout = perindTime * len(props[A])\
if A else None # no timeout for local machine;
results[A] = results[A].get(timeout=timeout)
except TimeoutError: # Timeout: remote machine is dead;
print("Machine timeouts!")
args[A][1] = 0 # Set to evaluate @ local machine
results[A] = self.evaluateBackend(* args[A])
TimedOut.append(A)
pool.join()
TotalNumberOfTrades = 0
for PoolIndex in range(len(results)):
for i, fit in enumerate(results[PoolIndex][0]):
if self.individual_info:
print(self.World.tools.showIndividue(fit))
self.World.tools.ApplyResult(fit, props[PoolIndex][i])
TotalNumberOfTrades += fit['trades']
self.lasttimes[PoolIndex] = results[PoolIndex][1]
L = len(props[PoolIndex])
self.lasttimesperind[PoolIndex] =\
self.lasttimes[PoolIndex] / L if L else 5
F = [x.fitness.valid for x in individues_to_simulate]
assert (all(F))
for T in TimedOut:
self.ejectURL(T)
N = len(individues_to_simulate)
# RECORD NUMBER OF EVALUATIONS;
locale.World.totalEvaluations += N
# CALCULATE AVERAGE TRADE NUMBER;
averageTrades = TotalNumberOfTrades / max(1, N)
return N, averageTrades
================================================
FILE: promoterz/evolutionHooks.py
================================================
#!/bin/python
from deap import base, tools
from copy import deepcopy
import random
import promoterz.supplement.age
import promoterz.supplement.PRoFIGA
import promoterz.supplement.phenotypicDivergence
import itertools
# population as last positional argument, to blend with toolbox;
def immigrateHoF(HallOfFame, population):
if not HallOfFame.items:
return population
for Q in range(1):
CHP = deepcopy(random.choice(HallOfFame))
del CHP.fitness.values
population += [CHP]
return population
def immigrateRandom(populate, nb_range, population): # (populate function)
number = random.randint(*nb_range)
population += populate(number)
return population
def filterAwayWorst(population, N=5):
aliveSize = len(population) - 5
population = tools.selBest(population, aliveSize)
return population
def filterAwayThreshold(locale, Threshold, min_nb_inds):
thresholdFilter = lambda ind: ind.fitness.values[0] > Threshold
populationFilter(locale, thresholdFilter, min_nb_inds)
def filterAwayTradeCounts(locale, ThresholdRange, min_nb_inds):
def tradecountFilter(ind):
if ind.trades < ThresholdRange[0]:
return False
elif ind.trades > ThresholdRange[1]:
return False
else:
return True
populationFilter(locale, tradecountFilter, min_nb_inds)
def filterAwayRoundtripDuration(locale, ThresholdRange, min_nb_inds):
def roundtripDurationFilter(ind):
averageExposureHours = ind.averageExposure
if averageExposureHours < ThresholdRange[0]:
return False
elif averageExposureHours > ThresholdRange[1]:
return False
else:
return True
populationFilter(locale, roundtripDurationFilter, min_nb_inds)
def populationFilter(locale, filterFunction, min_nb_inds):
newPopulation = [
ind for ind in locale.population if filterFunction(ind)
]
removed = [ind for ind in locale.population if ind not in newPopulation]
NBreturn = min(min_nb_inds - len(locale.population),
min_nb_inds)
NBreturn = max(0, NBreturn)
if NBreturn and removed:
for k in range(NBreturn):
if removed:
newPopulation.append(removed.pop(random.randrange(0,
len(removed))))
locale.population = newPopulation
def evaluatePopulation(locale):
individues_to_simulate = [ind for ind in locale.population
if not ind.fitness.valid]
fitnesses = locale.World.parallel.starmap(
locale.extratools.Evaluate, zip(individues_to_simulate)
)
for i, fit in zip(range(len(individues_to_simulate)), fitnesses):
individues_to_simulate[i].fitness.values = fit
return len(individues_to_simulate)
def getLocaleEvolutionToolbox(World, locale):
toolbox = base.Toolbox()
toolbox.register("ImmigrateHoF", immigrateHoF, locale.HallOfFame)
toolbox.register("ImmigrateRandom", immigrateRandom, World.tools.population)
toolbox.register("filterThreshold", filterAwayThreshold, locale)
toolbox.register("filterTrades", filterAwayTradeCounts, locale)
toolbox.register("filterExposure", filterAwayRoundtripDuration, locale)
toolbox.register('ageZero', promoterz.supplement.age.ageZero)
toolbox.register(
'populationAges',
promoterz.supplement.age.populationAges,
World.conf.generation.ageBoundaries,
)
toolbox.register(
'populationPD',
promoterz.supplement.phenotypicDivergence.populationPhenotypicDivergence,
World.tools.constructPhenotype,
)
toolbox.register('evaluatePopulation', evaluatePopulation)
return toolbox
def getGlobalToolbox(representationModule):
# GLOBAL FUNCTION TO GET GLOBAL TBX UNDER DEVELOPMENT;
toolbox = base.Toolbox()
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create(
"Individual",
list,
fitness=creator.FitnessMax,
PromoterMap=None,
Strategy=genconf.Strategy,
)
toolbox.register("mate", representationModule.crossover)
toolbox.register("mutate", representationModule.mutate)
PromoterMap = initPromoterMap(Attributes)
toolbox.register("newind", initInd, creator.Individual, PromoterMap)
toolbox.register("population", tools.initRepeat, list, toolbox.newind)
toolbox.register("constructPhenotype", representationModule.constructPhenotype)
return toolbox
def getFitness(individual):
R = sum(individual.wvalues)
def selectCriteria(ind):
return sum(ind.fitness.wvalues)
def selBest(individuals, number):
chosen = sorted(individuals, key=selectCriteria, reverse=True)
return chosen[:number]
def Tournament(individuals, finalselect, tournsize):
chosen = []
for i in range(finalselect):
aspirants = tools.selRandom(individuals, tournsize)
chosen.append(max(individuals, key=selectCriteria))
return chosen
================================================
FILE: promoterz/evolutionToolbox.py
================================================
#!/bin/python
from deap import base
def getExtraTools(HallOfFame, W):
T = base.Toolbox()
T.register('q')
================================================
FILE: promoterz/functions.py
================================================
#!/bin/python
import random
from deap import base
from deap import creator
from deap import tools
from copy import deepcopy
import importlib
def PrepareAndEvaluate(constructPhenotype, evaluationMethod, Individual):
phenotype = constructPhenotype(Individual)
return evaluationMethod(phenotype)
def selectRepresentationMethod(methodname):
M = importlib.import_module("promoterz.representation.%s" % methodname)
return M
================================================
FILE: promoterz/locale.py
================================================
#!/bin/python
from deap import tools
from . import evolutionHooks
from . import statistics
class Locale():
def __init__(self, World, name, position, loop):
self.World = World
self.name = name
self.EPOCH = 0
self.position = position
self.EvolutionStatistics = []
self.HallOfFame = tools.HallOfFame(30)
self.extratools = evolutionHooks.getLocaleEvolutionToolbox(
World, self
)
# GENERATION METHOD SELECTION;
# to easily employ various GA algorithms,
# this base EPOCH processor loads a GenerationMethod file,
# which should contain a genToolbox function to generate
# fully working DEAP toolbox, and a reconstructTradeSettings
# function to convert parameters from individue to usable strategy Settings;
# Check promoterz/representation;
#genconf.Strategy = Strategy # ovrride strat defined on settings if needed;
# --initial population
self.population = World.tools.population(World.conf.generation.POP_SIZE)
self.lastEvaluation = None
self.lastEvaluationOnSecondary = None
# --INIT STATISTICS;
self.stats = statistics.getStatisticsMeter()
self.InitialBestScores, self.FinalBestScores = [], []
self.POP_SIZE = World.conf.generation.POP_SIZE
self.loop = loop
def run(self):
print(self.name)
self.loop(self.World, self)
self.EPOCH += 1
================================================
FILE: promoterz/logAnalysis.py
================================================
================================================
FILE: promoterz/logger.py
================================================
#!/bin/python
import datetime
import os
import csv
class Logger():
def __init__(self, logfilename):
date = datetime.datetime.now()
if not os.path.isdir('logs'):
os.mkdir('logs')
self.logfilename = logfilename
self.Header = ""
self.Summary = ""
self.Body = ""
self.Online = False
def log(self, message, target="Body", show=True, replace=False):
if target == "Body":
# now the log has value to be written.
if not self.Online:
os.mkdir('logs/%s' % self.logfilename)
os.mkdir('logs/%s/results' % self.logfilename)
self.Online = True
if replace:
self.__dict__[target] = message
else:
self.__dict__[target] += message + '\n'
if show:
print(message)
def updateFile(self):
if not self.Online:
return
File = open('logs/%s/japonicus.log' % self.logfilename, 'w')
for segment in [self.Header, self.Summary, self.Body]:
File.write(segment + '\n')
File.close()
def write_evolution_logs(self, i, stats, localeName):
filename = "logs/%s/%s.csv" % (self.logfilename, localeName)
if stats:
fieldnames = list(stats[0].keys())
with open(filename, 'w') as f:
df = csv.DictWriter(f, fieldnames)
df.writeheader()
df.writerows(stats)
def saveParameters(self, filename, content):
filename = "logs/%s/results/%s.toml" % (self.logfilename, filename)
File = open(filename, 'w')
File.write(content)
File.close()
================================================
FILE: promoterz/metaPromoterz.py
================================================
#!/bin/python
# this file contains functions for 'meta genetic algorithm',
# this acts to allow settings value manipulation via command line,
# making possible a simple GA of GAs under bash.
# TBD
from .parameterOperations import flattenParameters, expandNestedParameters
def generateCommandLineArguments(parser, settings):
flatSettings = flattenParameters(settings)
for Setting in flatSettings.keys():
if type(flatSettings[Setting]) in [list, bool, tuple]:
pass
else:
originalValue = flatSettings[Setting]
parameterType = type(originalValue)
if parameterType.__name__ == 'NoneType':
parameterType = str
parser.add_option("--%s" % Setting,
dest=Setting,
type=parameterType.__name__,
default=originalValue)
return parser
def applyCommandLineOptionsToSettings(options, settings):
flatSettings = flattenParameters(settings)
for Setting in flatSettings.keys():
if Setting in options.__dict__.keys():
flatSettings[Setting] = options.__dict__[Setting]
Settings = expandNestedParameters(flatSettings)
return Settings
================================================
FILE: promoterz/parameterOperations.py
================================================
#!/bin/python
def flattenParameters(Parameters):
result = {}
def iter(D, path= []):
for q in D.keys():
if type(D[q]) == dict:
iter(D[q], path + [q])
else:
path_keyname = ".".join(path + [q])
result.update({path_keyname: D[q]})
iter(Parameters)
return result
def expandNestedParameters(Parameters):
_Parameters = {}
for K in Parameters.keys():
if '.' in K:
Q = K.split('.')
cursor = 0
base = _Parameters
while cursor < len(Q) - 1:
if not Q[cursor] in base.keys():
base[Q[cursor]] = {}
base = base[Q[cursor]]
cursor += 1
base[Q[cursor]] = Parameters[K]
else:
_Parameters[K] = Parameters[K]
return _Parameters
def parameterValuesToRangeOfValues(TargetParameters, Spread):
for parameter in TargetParameters.keys():
P = TargetParameters[parameter]
if type(P) not in [tuple, list]:
spread_change = Spread * P / 200
if P < 0:
spread_change = -spread_change
TargetParameters[parameter] = (P - spread_change, P + spread_change)
return TargetParameters
================================================
FILE: promoterz/representation/Creator.py
================================================
#!/bin/python
from .import deapCreator as creator
from deap import base
def init(fitness, extraParameters):
creator.create("FitnessMax", fitness, weights=(1.0, 1))
creator.create("Individual", list, fitness=creator.FitnessMax, **extraParameters)
return creator
================================================
FILE: promoterz/representation/chromosome.py
================================================
#!/bin/python
from deap import base
from deap import tools
from copy import deepcopy
import random
from . .import parameterOperations
from .import Creator
getPromoterFromMap = lambda x: [x[z] for z in list(x.keys())]
def constructPhenotype(stratSettings, chrconf, Individue):
Settings = {}
GeneSize = 2
R = lambda V, lim: (lim[1] - lim[0]) * V / (33 * chrconf['GeneSize']) + lim[0]
PromotersPath = {v: k for k, v in Individue.PromoterMap.items()}
# print(PromotersPath)
#print(Individue[:])
Promoters = list(PromotersPath.keys())
for C in Individue:
for BP in range(len(C)):
if C[BP] in Promoters:
read_window = C[BP + 1: BP + 1 + GeneSize]
read_window = [V for V in read_window if type(V) == int and V < 33]
Value = sum(read_window)
ParameterName = PromotersPath[C[BP]]
Value = R(Value, stratSettings[ParameterName])
Settings[ParameterName] = Value
_Settings = parameterOperations.expandNestedParameters(Settings)
return _Settings
def getToolbox(Strategy, genconf, Attributes):
toolbox = base.Toolbox()
creator = Creator.init(base.Fitness, {'promoterMap': None, 'Strategy': Strategy})
# creator.create("FitnessMax", base.Fitness, weights=(1.0, 3))
toolbox.register("mate", pachytene)
toolbox.register("mutate", mutate)
PromoterMap = initPromoterMap(Attributes)
toolbox.register(
"newind", initInd, creator.Individual, PromoterMap, genconf.chromosome
)
toolbox.register("population", tools.initRepeat, list, toolbox.newind)
toolbox.register(
"constructPhenotype", constructPhenotype, Attributes, genconf.chromosome
)
return toolbox
def initPromoterMap(ParameterRanges):
PRK = list(ParameterRanges.keys())
Promoters = [x for x in PRK]
space = list(range(120, 240))
random.shuffle(space)
PromoterValues = [space.pop() for x in Promoters]
PromoterMap = dict(zip(Promoters, PromoterValues))
# print(ParameterRanges)
assert (len(PRK) == len(list(PromoterMap.keys())))
return PromoterMap
def initChromosomes(PromoterMap, chrconf):
Promoters = getPromoterFromMap(PromoterMap)
PromoterPerChr = round(len(Promoters) / chrconf['Density']) + 1
_promoters = deepcopy(Promoters)
Chromosomes = [[] for k in range(PromoterPerChr)]
while _promoters:
for c in range(len(Chromosomes)):
if random.random() < 0.3:
if _promoters:
promoter = _promoters.pop(random.randrange(0, len(_promoters)))
Chromosomes[c].append(promoter)
for G in range(chrconf['GeneSize']):
Chromosomes[c].append(random.randrange(0, 33))
return Chromosomes
def initInd(Individual, PromoterMap, chrconf):
i = Individual()
i[:] = initChromosomes(PromoterMap, chrconf)
i.PromoterMap = PromoterMap
return i
def generateUID():
Chars = string.ascii_uppercase + string.digits
UID = ''.join(random.choices(Chars), k=6)
return UID
def chromossomeCrossover(chr1, chr2):
if len(chr1) != len(chr2):
top_bottom = 1 if random.random() < 0.5 else -1
len_diff = abs(len(chr1) - len(chr2))
else:
top_bottom = 1
len_diff = 0
offset = random.randrange(0, len_diff + 1)
minor = chr1 if len(chr1) < len(chr2) else chr2
major = chr2 if len(chr1) < len(chr2) else chr1
cut_point = random.randrange(0, len(minor))
for k in range(cut_point, len(minor)):
Buffer = major[k + offset]
major[k + offset] = minor[k]
minor[k] = Buffer
def pachytene(ind1, ind2):
if len(ind1) != len(ind2):
return
ind1 = deepcopy(ind1)
ind2 = deepcopy(ind2)
ind1[:] = sorted(ind1, key=len)
ind2[:] = sorted(ind2, key=len)
childChr = []
for W in range(len(ind1)):
chromossomeCrossover(ind1[W], ind2[W])
childChr.append(random.choice([ind1[W], ind2[W]]))
return ind1, ind2
def mutate(ind, mutpb=0.001, mutagg=12):
for C in range(len(ind)):
for BP in range(len(ind[C])):
if BP < 100: # case BP is common base value;
if random.random() < mutpb:
ind[C][BP] += random.choice(range(-mutagg, mutagg))
else: # case BP is in fact a promoter;
pass
return ind,
def clone(Chr): #!!review this
cut_point = random.randrange(- len(Chr), len(Chr))
if not cut_point:
cut_point = 1
if cut_point > 0:
new_chr = chr[:cut_point]
if cut_point < 0:
new_chr = chr[cut_point:]
Chr += new + Chr
================================================
FILE: promoterz/representation/deapCreator.py
================================================
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see .
"""The :mod:`~deap.creator` is a meta-factory allowing to create classes that
will fulfill the needs of your evolutionary algorithms. In effect, new
classes can be built from any imaginable type, from :class:`list` to
:class:`set`, :class:`dict`, :class:`~deap.gp.PrimitiveTree` and more,
providing the possibility to implement genetic algorithms, genetic
programming, evolution strategies, particle swarm optimizers, and many more.
"""
import array
import copy
import warnings
import copyreg as copy_reg
class_replacers = {}
"""Some classes in Python's standard library as well as third party library
may be in part incompatible with the logic used in DEAP. To palliate
this problem, the method :func:`create` uses the dictionary
`class_replacers` to identify if the base type provided is problematic, and if
so the new class inherits from the replacement class instead of the
original base class.
`class_replacers` keys are classes to be replaced and the values are the
replacing classes.
"""
try:
import numpy
(numpy.ndarray, numpy.array)
except ImportError:
# Numpy is not present, skip the definition of the replacement class.
pass
except AttributeError:
# Numpy is present, but there is either no ndarray or array in numpy,
# also skip the definition of the replacement class.
pass
else:
class _numpy_array(numpy.ndarray):
def __deepcopy__(self, memo):
"""Overrides the deepcopy from numpy.ndarray that does not copy
the object's attributes. This one will deepcopy the array and its
:attr:`__dict__` attribute.
"""
copy_ = numpy.ndarray.copy(self)
copy_.__dict__.update(copy.deepcopy(self.__dict__, memo))
return copy_
@staticmethod
def __new__(cls, iterable):
"""Creates a new instance of a numpy.ndarray from a function call.
Adds the possibility to instanciate from an iterable."""
return numpy.array(list(iterable)).view(cls)
def __setstate__(self, state):
self.__dict__.update(state)
def __reduce__(self):
return (self.__class__, (list(self),), self.__dict__)
class_replacers[numpy.ndarray] = _numpy_array
class _array(array.array):
@staticmethod
def __new__(cls, seq=()):
return super(_array, cls).__new__(cls, cls.typecode, seq)
def __deepcopy__(self, memo):
"""Overrides the deepcopy from array.array that does not copy
the object's attributes and class type.
"""
cls = self.__class__
copy_ = cls.__new__(cls, self)
memo[id(self)] = copy_
copy_.__dict__.update(copy.deepcopy(self.__dict__, memo))
return copy_
def __reduce__(self):
return (self.__class__, (list(self),), self.__dict__)
class_replacers[array.array] = _array
class CreatorMeta(type):
def __new__(meta, name, base, dct):
return super(CreatorMeta, meta).__new__(meta, name, (base,), dct)
def __init__(cls, name, base, dct):
# A DeprecationWarning is raised when the object inherits from the
# class "object" which leave the option of passing arguments, but
# raise a warning stating that it will eventually stop permitting
# this option. Usually this happens when the base class does not
# override the __init__ method from object.
dict_inst = {}
dict_cls = {}
for obj_name, obj in dct.items():
if isinstance(obj, type):
dict_inst[obj_name] = obj
else:
dict_cls[obj_name] = obj
def initType(self, *args, **kargs):
"""Replace the __init__ function of the new type, in order to
add attributes that were defined with **kargs to the instance.
"""
for obj_name, obj in dict_inst.items():
setattr(self, obj_name, obj())
if base.__init__ is not object.__init__:
base.__init__(self, *args, **kargs)
cls.__init__ = initType
cls.reduce_args = (name, base, dct)
super(CreatorMeta, cls).__init__(name, (base,), dict_cls)
def __reduce__(cls):
return (meta_creator, cls.reduce_args)
copy_reg.pickle(CreatorMeta, CreatorMeta.__reduce__)
def meta_creator(name, base, dct):
class_ = CreatorMeta(name, base, dct)
globals()[name] = class_
return class_
def create(name, base, **kargs):
"""Creates a new class named *name* inheriting from *base* in the
:mod:`~deap.creator` module. The new class can have attributes defined by
the subsequent keyword arguments passed to the function create. If the
argument is a class (without the parenthesis), the __init__ function is
called in the initialization of an instance of the new object and the
returned instance is added as an attribute of the class' instance.
Otherwise, if the argument is not a class, (for example an :class:`int`),
it is added as a "static" attribute of the class.
:param name: The name of the class to create.
:param base: A base class from which to inherit.
:param attribute: One or more attributes to add on instanciation of this
class, optional.
The following is used to create a class :class:`Foo` inheriting from the
standard :class:`list` and having an attribute :attr:`bar` being an empty
dictionary and a static attribute :attr:`spam` initialized to 1. ::
create("Foo", list, bar=dict, spam=1)
This above line is exactly the same as defining in the :mod:`creator`
module something like the following. ::
class Foo(list):
spam = 1
def __init__(self):
self.bar = dict()
The :ref:`creating-types` tutorial gives more examples of the creator
usage.
"""
if name in globals():
warnings.warn("A class named '{0}' has already been created and it "
"will be overwritten. Consider deleting previous "
"creation of that class or rename it.".format(name),
RuntimeWarning)
# Check if the base class has to be replaced
if base in class_replacers:
base = class_replacers[base]
meta_creator(name, base, kargs)
================================================
FILE: promoterz/representation/oldschool.py
================================================
#!/bin/python
import random
import json
import os
from copy import deepcopy
from .import Creator
from deap import base
from deap import tools
from . .import parameterOperations
def constructPhenotype(stratSettings, individue):
# THIS FUNCTION IS UGLYLY WRITTEN; USE WITH CAUTION;
# (still works :})
Strategy = individue.Strategy
R = lambda V, lim: ((lim[1] - lim[0]) / 100) * V + lim[0]
AttributeNames = sorted(list(stratSettings.keys()))
Phenotype = {}
for K in range(len(AttributeNames)):
Value = R(individue[K], stratSettings[AttributeNames[K]])
Phenotype[AttributeNames[K]] = Value
Phenotype = parameterOperations.expandNestedParameters(Phenotype)
return Phenotype
def createRandomVarList(IndSize):
VAR_LIST = [random.randrange(0, 100) for x in range(IndSize)]
return VAR_LIST
def initInd(Criterion, Attributes):
w = Criterion()
IndSize = len(list(Attributes.keys()))
w[:] = createRandomVarList(IndSize)
return w
def getToolbox(Strategy, genconf, Attributes):
toolbox = base.Toolbox()
creator = Creator.init(base.Fitness, {'Strategy': Strategy})
toolbox.register("newind", initInd, creator.Individual, Attributes)
toolbox.register("population", tools.initRepeat, list, toolbox.newind)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutUniformInt, low=10, up=10, indpb=0.2)
toolbox.register("constructPhenotype", constructPhenotype, Attributes)
return toolbox
================================================
FILE: promoterz/sequence/__init__.py
================================================
#!/bin/python
from .locale import standard_loop
from .world import parallel_world
================================================
FILE: promoterz/sequence/locale/standard_loop.py
================================================
#!/bin/python
from deap import tools
from copy import deepcopy
import random
from deap import algorithms
from ... import statistics
from ... import evolutionHooks
from ... import validation
from ... import supplement
def checkPopulation(population, message):
if not (len(population)):
print(message)
def execute(World, locale):
# --populate if we don't have population (migrations might do it);
if not (locale.population):
locale.population = locale.extratools.ImmigrateRandom(
(5, 10),
locale.population
)
locale.extraStats = {}
# --validate individuals;
locale.population = validation.validatePopulation(
World.tools.constructPhenotype,
World.TargetParameters,
locale.population
)
# --remove equal citizens before evaluation for efficency
nonevaluated = [ind for ind in locale.population if not ind.fitness.valid]
Lu = len(nonevaluated)
print("first unevaluated: %i" % len(nonevaluated))
remains = locale.extratools.populationPD(nonevaluated, 1.0)
Lr = len(remains)
print("%i individues removed due to equality" % (Lu - Lr))
locale.population = [
ind for ind in locale.population if ind.fitness.valid
] + remains
# --load current dataset for locale;
locale.Dataset = World.loadDatasetForLocalePosition(locale.position)
# --evaluate individuals;
locale.extraStats['nb_evaluated'], locale.extraStats[
'avgTrades'
] = World.parallel.evaluatePopulation(
locale
)
locale.extraStats['avgExposure'] = sum(
[I.averageExposure
for I in locale.population])/len(locale.population)
# --send best individue to HallOfFame;
if not locale.EPOCH % 15:
BestSetting = tools.selBest(locale.population, 1)[0]
locale.HallOfFame.insert(BestSetting)
assert (sum([x.fitness.valid for x in locale.population]) == len(locale.population))
# --compile stats;
World.EvaluationModule.compileStats(locale)
# --population ages
qpop = len(locale.population)
locale.population = locale.extratools.populationAges(
locale.population, locale.EvolutionStatistics[locale.EPOCH]
)
wpop = len(locale.population)
locale.extraStats['nbElderDies'] = qpop - wpop
# INDIVIDUE FITNESS ATTRIBUTES FILTERS;
# --remove very inapt citizens
if World.conf.generation.minimumProfitFilter is not None:
locale.extratools.filterThreshold(World.conf.generation.minimumProfitFilter,
World.conf.generation._lambda)
checkPopulation(locale.population,
"Population dead after profit filter.")
# --remove individuals below tradecount
if World.conf.generation.TradeNumberFilterRange is not None:
locale.extratools.filterTrades(World.conf.generation.TradeNumberFilterRange,
World.conf.generation._lambda)
checkPopulation(locale.population,
"Population dead after trading number filter.")
# --remove individues based on average roundtripe exposure time;
if World.conf.generation.averageExposureLengthFilterRange is not None:
locale.extratools.filterExposure(
World.conf.generation.averageExposureLengthFilterRange,
World.conf.generation._lambda
)
checkPopulation(locale.population,
"Population dead after roundtrip exposure filter.")
if not locale.population:
locale.population = World.tools.population(World.conf.generation.POP_SIZE)
print("Repopulating... Aborting epoch.")
# --show stats;
World.EvaluationModule.showStatistics(locale)
# --calculate new population size;
if locale.EPOCH:
PRoFIGA = supplement.PRoFIGA.calculatePRoFIGA(
World.conf.generation.PRoFIGA_beta,
locale.EPOCH,
World.conf.generation.NBEPOCH,
locale.EvolutionStatistics[locale.EPOCH - 1],
locale.EvolutionStatistics[locale.EPOCH],
)
locale.POP_SIZE += locale.POP_SIZE * PRoFIGA
# put population size inside thresholds;
minps = World.conf.generation.POP_SIZE // 2
maxps = World.conf.generation.POP_SIZE * 3
try:
_POP_SIZE = max(min(locale.POP_SIZE, maxps), minps)
locale.POP_SIZE = int(round(_POP_SIZE))
except Exception as e:
locale.POP_SIZE = 30
M = "POP_SIZE PROFIGA ERROR;"
print(M)
# --filter best inds;
locale.population[:] = evolutionHooks.selBest(locale.population,
locale.POP_SIZE)
checkPopulation(locale.population,
"Population dead after selection of score filter.")
assert (None not in locale.population)
# --select best individues to procreate
LAMBDA = max(World.conf.generation._lambda,
locale.POP_SIZE - len(locale.population))
TournamentSize = max(2 * LAMBDA,
len(locale.population))
offspring = evolutionHooks.Tournament(locale.population,
LAMBDA,
TournamentSize)
offspring = [deepcopy(x) for x in offspring] # is deepcopy necessary?
# --modify and integrate offspring;
offspring = algorithms.varAnd(
offspring, World.tools, World.conf.generation.cxpb, World.conf.generation.mutpb
)
locale.extratools.ageZero(offspring)
locale.population += offspring
# --NOW DOESN'T MATTER IF SOME INDIVIDUE LACKS FITNESS VALUES;
assert (None not in locale.population)
# --immigrate individual from HallOfFame;
if random.random() < 0.2:
locale.population = locale.extratools.ImmigrateHoF(locale.population)
# --immigrate random number of random individues;
if random.random() < 0.5 or not locale.population:
locale.population = locale.extratools.ImmigrateRandom(
(2, 7),
locale.population
)
assert (None not in locale.population)
================================================
FILE: promoterz/sequence/world/parallel_world.py
================================================
#!/bin/python
import random
import itertools
import math
import time
def execute(World):
# --APPLY MIGRATION BETWEEN LOCALES;
if len(World.locales):
S, D = False, False
LocalePairs = itertools.combinations(World.locales, 2)
for L in LocalePairs:
distance = World.calculateDistance(L[0].position, L[1].position)
distance_weight = distance / World.maxdistance
if random.random() > distance_weight:
World.migration(L[0], L[1], (1, 7))
World.migration(L[1], L[0], (1, 7))
# --APPLY LOCALE CREATION;
if random.random() < World.conf.generation.localeCreationChance / 100:
World.generateLocale()
# --APPLY RANDOMIC LOCALE DESTRUCTION;
if random.random() < World.conf.generation.localeExplodeChance / 100:
chosenLocale = random.choice(World.locales)
World.explodeLocale(chosenLocale)
# --APPLY EXPECTED LOCALE DESTRUCTION;
for L in range(len(World.locales)):
if World.locales[L].EPOCH > World.conf.generation.localeExpirationAge:
if len(World.locales) > 2:
World.explodeLocale(World.locales[L])
# if two locales are destroyed @ same time, post-locale migrations
# will be a mess
break
# --APPLY LOCALE WALKS;
for L in range(len(World.locales)):
if random.random() < World.conf.generation.localeWalkChance / 100:
World.localeWalk(World.locales[L])
================================================
FILE: promoterz/statistics.py
================================================
#!/bin/python
import numpy as np
from deap import tools
def getStatisticsMeter():
stats = tools.Statistics(lambda ind: ind.fitness.values[0])
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
return stats
================================================
FILE: promoterz/supplement/PRoFIGA.py
================================================
#!/bin/python
def calculatePRoFIGA(beta, EPOCH, NBEPOCH, oldstats, Stats):
remainingEPOCH_NB = NBEPOCH - EPOCH
X = beta * remainingEPOCH_NB * (Stats['max'] - oldstats['max']) / oldstats['max']
return X
================================================
FILE: promoterz/supplement/age.py
================================================
# 1/bin/python
from deap import base
def _maturePopulation(population):
for W in range(len(population)):
try:
assert (population[W].Age)
except:
population[W].Age = 0
population[W].Age += 1
def _checkRetirement(individue, statistics, ageBoundary):
# (Minetti, 2005)
indscore = individue.fitness.values[0]
N = (ageBoundary[1] - ageBoundary[0]) / 2
aptitude = indscore - statistics['avg']
if aptitude > 0:
ABC = sum(ageBoundary) / 2
RSB = statistics['max'] - statistics['avg']
else:
ABC = ageBoundary[0]
RSB = statistics['avg'] - statistics['min']
RSB = max(1, RSB)
survival = ABC + (N * aptitude / RSB)
# oldenough = individue.Age > ageBoundary[0]
#relativeAge = (individue.Age-ageBoundary[0]) / (ageBoundary[1]-ageBoundary[0])
retires = individue.Age - survival > ageBoundary[1]
# print(survival)
return retires
def _killElders(population, statistics, ageBoundary):
for I in range(len(population)):
if _checkRetirement(population[I], statistics, ageBoundary):
population[I] = None
population = [x for x in population if x]
return population
def ageZero(population):
for q in range(len(population)):
population[q].Age = 0
def populationAges(ageBoundary, population, averageScore):
_maturePopulation(population)
population = _killElders(population, averageScore, ageBoundary)
return population
================================================
FILE: promoterz/supplement/phenotypicDivergence.py
================================================
#!/bin/python
from deap import tools
from . .import parameterOperations
import random
def checkPhenotypicDivergence(constructPhenotype, indA, indB):
cmp = [indA, indB]
cmp = [constructPhenotype(x) for x in cmp]
cmp = [parameterOperations.flattenParameters(x) for x in cmp]
score = 0
for w in cmp[0].keys():
if cmp[0][w] != cmp[1][w]:
score += 1
return score
def populationPhenotypicDivergence(constructPhenotype, population, delpercent):
if len(population) > 1:
for I in range(len(population) - 1):
for J in range(I + 1, len(population)):
if population[I]:
score = checkPhenotypicDivergence(
constructPhenotype, population[I], population[J]
)
if not score and random.random() < delpercent:
population[I] = None
population = [x for x in population if x]
return population
================================================
FILE: promoterz/validation.py
================================================
#!/bin/python
from .parameterOperations import flattenParameters
def checkPhenotypeParameterIntegrity(TargetParameters, phenotype):
cmp = [TargetParameters, phenotype]
cmp = [flattenParameters(x) for x in cmp]
# print(cmp)
cmp = [list(x.keys()) for x in cmp]
# print("%i ---- %i" % (len(cmp[0]), len(cmp[1])))
for w in cmp[0]:
if not w in cmp[1]:
return w
return None
def checkPhenotypeAttributeRanges(TargetParameters, phenotype, tolerance=0.3):
cmp = [TargetParameters, phenotype]
cmp = [flattenParameters(x) for x in cmp]
for K in cmp[0].keys():
high_bound = cmp[0][K][1] + (tolerance * abs(cmp[0][K][1]))
low_bound = cmp[0][K][0] - (tolerance * abs(cmp[0][K][0]))
higher = cmp[1][K] > high_bound
lower = cmp[1][K] < low_bound
if higher or lower:
return "%f %s %f" % (low_bound, K, high_bound)
return None
def validatePopulation(IndividualToSettings, TargetParameters, population):
ErrMsg = "--destroying invalid citizen--\n\t({ErrType} {ErrParameter})\n"
for p in range(len(population)):
phenotype = IndividualToSettings(population[p])
Err = checkPhenotypeParameterIntegrity(TargetParameters, phenotype)
if Err:
print(ErrMsg.format(ErrType='missing parameter', ErrParameter=Err))
population[p] = None
continue
Err = checkPhenotypeAttributeRanges(TargetParameters, phenotype)
if Err:
print(ErrMsg.format(ErrType=' invalid values on', ErrParameter=Err))
population[p] = None
if not population[p]:
print(phenotype)
pass
population = [x for x in population if x]
return population
================================================
FILE: promoterz/webServer/__init__.py
================================================
#!/bin/python
from .import core
================================================
FILE: promoterz/webServer/core.py
================================================
#!/bin/python
import os
import re
import datetime
import flask
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
from flask_caching import Cache
from evaluation.gekko.statistics import epochStatisticsNames, periodicStatisticsNames
from . import graphs
from . import layout
import functools
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
def build_server(webpageTitle):
# Setup the app
server = flask.Flask(__name__)
app = dash.Dash(__name__, server=server, csrf_protect=False)
app.scripts.config.serve_locally = False
app.css.config.serve_locally = False
app.webpageTitle = webpageTitle
timeout = 60 * 60 # 1 hour
app.startTime = datetime.datetime.now()
# Graph Update function bindings;
app.updateLocaleGraph = graphs.updateLocaleGraph
app.updateWorldGraph = graphs.updateWorldGraph
app.updateEvalBreakGraph = graphs.updateEvalbreakGraph
# Graphics initialization and input points against World;
# why is this placeholder required? ;(
app.WorldGraph = dcc.Graph(id='WorldGraph', figure={})
app.LocaleGraphs = []
app.EvalBreakGraph = []
app.resultParameters = []
app.epochInfo = ""
app.layout = functools.partial(layout.getLayout, app)
app.config['suppress_callback_exceptions'] = False
# event triggers
onRefreshClick = Input('refresh-button', 'n_clicks')
"""
# update graph methods
@app.callback(Output('last-refresh', 'children'),
[Input('refresh-button', 'n_clicks')])
def display_time(w):
print("Refreshing graphical interface graphics.")
return str(datetime.datetime.now())
@app.callback(Output('WorldGraph', 'children'),
[Input('refresh-button', 'n_clicks')])
def updateGGraphs(w):
return [app.WorldGraph]
@app.callback(Output('LocaleGraphs', 'children'),
[Input('refresh-button', 'n_clicks')])
def updateLGraphs(w):
return [app.GraphicList]
"""
# SELECT PAGE;
@app.callback(dash.dependencies.Output('page-content', 'children'),
[dash.dependencies.Input('url', 'pathname')])
def display_page(pathname):
if re.findall("evalbreak", str(pathname)):
return layout.getEvalbreak(app)
if re.findall("results", str(pathname)):
return layout.getResults(app)
else:
return layout.getCommon(app)
@server.route('/static/')
def send_css(path):
return flask.send_from_directory(os.path.dirname(__file__), path)
# load external css
currentDirectory = os.path.dirname(os.path.abspath(__file__))
externalCssListPath = os.path.join(currentDirectory,
"external_css_list.txt")
with open(externalCssListPath) as cssListFile:
external_css = cssListFile.read().split("\n")
external_css = list(filter(None, external_css))
for css in external_css:
app.css.append_css({"external_url": css})
# launch DASH APP
return app, server
================================================
FILE: promoterz/webServer/external_css_list.txt
================================================
https://fonts.googleapis.com/css?family=Overpass:400,400i,700,700i
https://cdn.jsdelivr.net/gh/plotly/dash-app-stylesheets@c6a126a684eaaa94a708d41d6ceb32b28ac78583/dash-technical-charting.css
================================================
FILE: promoterz/webServer/graphs.py
================================================
#!/bin/python
import dash_core_components as dcc
from evaluation.gekko.statistics import epochStatisticsNames, periodicStatisticsNames
def updateWorldGraph(app, WORLD):
environmentData = [
{
}
]
populationGroupData = [
{
'x': [locale.position[0]],
'y': [locale.position[1]],
'type': 'scatter',
'name': locale.name,
'showscale': False,
'mode': 'markers',
'marker': {
'symbol': 'square'
}
} for locale in WORLD.locales
]
fig = {
'data': populationGroupData,
'layout': {
'title': "World Topology: 2D MAP"
}
}
G = dcc.Graph(id="WorldGraph", figure=fig)
#app.layout.get("WorldGraphContainer").children = [G]
app.WorldGraph = G
return G
def updateLocaleGraph(app, LOCALE):
GraphName = LOCALE.name
print('Loading %s' % GraphName)
Statistics = LOCALE.EvolutionStatistics
ID = [s for s in GraphName if s.isdigit()]
annotations = []
oldLocaleGraph = None
for lidx, localeGraph in enumerate(app.LocaleGraphs):
if localeGraph.id == LOCALE.name:
oldLocaleGraph = lidx
break
statisticsNames = {}
statisticsNames.update(epochStatisticsNames)
# statisticsNames.update(periodicStatisticsNames)
annotationFontDescription = {
'family': 'Arial',
'size': 12,
'color': 'rgb(37,37,37)'
}
"""
for Statistic in Statistics:
if 'dateRange' in Statistic.keys():
if Statistic['dateRange']:
for R, dateRange in enumerate(Statistic['dateRange']):
if dateRange is not None:
annotations.append(
{
'xref': 'axis',
'yref': 'paper',
'xanchor': 'left',
'yanchor': 'bottom',
'font': annotationFontDescription,
'x': R,
'y': 1 if not len(annotations) %
2 else 0.93, # avoid label overlap;
'text': dateRange,
}
)
"""
colorSequence = [
(188, 189, 34),
(100, 11, 182),
(186, 3, 34),
(45, 111, 45),
(66, 128, 66),
(128, 66, 66),
]
statNames = [
'avg', 'std', 'min',
'max',
#'evaluationScore',
#'evaluationScoreOnSecondary'
]
DATA = [
{
'x': [Statistic['id'] for Statistic in Statistics],
'y': [Statistic[statNames[S]] for Statistic in Statistics],
'type': 'line',
'name': statisticsNames[statNames[S]],
'line': {'color': 'rgb%s' % str(colorSequence[S])},
}
for S in range(len(statNames))
]
fig = {
'data': DATA,
'layout': {
'title': 'Evolution at %s' % GraphName,
'annotations': annotations
},
}
G = dcc.Graph(figure=fig, id=LOCALE.name)
if oldLocaleGraph is not None:
app.LocaleGraphs[oldLocaleGraph] = G
else:
app.LocaleGraphs.append(G)
return G
def updateEvalbreakGraph(app, EvaluationSummary):
K = ["evaluation", "secondary"]
GES = dict([(k, []) for k in K])
for E in EvaluationSummary:
for k in K:
if k in E.keys():
GES[k].append(E[k])
else:
GES[k].append(None)
DATA = [
{
'x': list(range(len(GES[KEY]))),
'y': GES[KEY],
'type': 'line',
'name': KEY.upper()
} for KEY in GES.keys()
]
figure = {
'data': DATA,
'layout': {
'title': "Evaluation Breaks"
}
}
G = dcc.Graph(figure=figure, id="EvaluationBreaksGraph")
app.EvalBreakGraph = G
return G
================================================
FILE: promoterz/webServer/layout.py
================================================
#!/bin/python
import dash_core_components as dcc
import dash_html_components as html
import datetime
allStyle = {
'width': '1100',
'margin-left': 'auto',
'margin-right': 'auto',
'font-family': 'overpass',
'background-color': '#F3F3F3'
}
def getLayout(app):
layout = html.Div([
dcc.Location(id='url', refresh=False),
getHeader(app),
html.Div(id='page-content')
])
return layout
def getHeader(app):
# this is a mess;
inlineBlock = {"display": "inline-block"}
headerWidgets = [
html.Button("Refresh", id='refresh-button'),
html.Div(
[
html.Div("Last refresh @ ", style=inlineBlock.update({"float": "left"})),
html.Div(datetime.datetime.now(),
id='last-refresh', className="showTime",
style=inlineBlock.update({"float": "left"})),
html.Div("%s Start time" % app.startTime,
id='start-time', className="showTime",
style=inlineBlock.update({"float": "right"})),
html.Br(),
html.Center([
html.Div(app.epochInfo, id="current-epoch")
])
], className="showTime")
]
pageMenu = [
html.A(html.Button("Evolution Statistics"), href="/"),
html.A(html.Button("Evaluation Breaks"), href="/evalbreak"),
html.A(html.Button("View Results"), href="/results")
# html.Button("View Settings", className="unimplemented"),
# html.Button("Inspect Population", className="unimplemented")
]
# html.Link(rel='stylesheet', href='/static/promoterz_style.css'),
header = html.Div(
[
html.H2(
app.webpageTitle,
style={'padding-top': '20', 'text-align': 'center'},
),
html.Div(headerWidgets),
html.Div(pageMenu),
],
style=allStyle)
return header
def getCommon(app):
return html.Div([
html.Div(children=app.WorldGraph, id='WorldGraphContainer'),
html.Div(children=app.LocaleGraphs, id='LocaleGraphsContainer')
], style=allStyle)
def getEvalbreak(app):
return html.Div([
html.Div(children=app.EvalBreakGraph, id='EvalBreakContainer')
], style=allStyle)
def getResults(app):
return [html.Textarea(str(r[0]) + '\n' + str(r[1]), style={'width': '525', 'height': '550'}) for r in app.resultParameters]
================================================
FILE: promoterz/webServer/promoterz_style.css
================================================
.unimplemented {
background-color: #666;
}
.showTime {
display: inline-block;
}
================================================
FILE: promoterz/world.py
================================================
#!/bin/python
import random
import time
import math
from . import locale
class World():
def __init__(
self,
GlobalTools=None,
populationLoops=None,
worldLoops=None,
conf=None,
TargetParameters=None,
EnvironmentParameters=None,
onInitLocale=None,
web=None,
):
self.tools = GlobalTools
# main components
self.populationLoops = populationLoops
self.worldLoops = worldLoops
# genetic algorithm status
self.EPOCH = 0
self.locales = []
self.totalEvaluations = 0
# genetic algorithm attributes
self.size = [500, 500]
self.maxdistance = self.calculateDistance([0, 0], self.size)
self.TargetParameters = TargetParameters
self.conf = conf
# Temporary assignment of configs
self.localeID = 1
self.EnvironmentParameters = EnvironmentParameters
self.onInitLocale = onInitLocale
self.web = web
def generateLocale(self):
name = 'Locale%i' % (self.localeID)
self.localeID += 1
position = [random.randrange(0, self.size[x]) for x in range(2)]
L = locale.Locale(self,
name,
position,
random.choice(self.populationLoops)
)
self.locales.append(L)
def migration(self, source, target, number_range):
number = random.randrange(*number_range)
for W in range(number):
if len(source.population):
index = random.randrange(0, len(source.population))
individual = source.population.pop(index)
del individual.fitness.values
target.population.append(individual)
def explodeLocale(self, explLocale):
if len(self.locales) < 2:
return
totaldistance = 0
for T in self.locales:
if explLocale == T:
T.tempdist = 0
continue
distance = self.calculateDistance(
explLocale.position, T.position)
T.tempdist = distance
totaldistance += distance
for T in self.locales:
fugitiveNumber = T.tempdist / totaldistance *\
len(explLocale.population)
T.fugitivenumber = int(round(fugitiveNumber))
for T in self.locales:
self.migration(explLocale, T,
(T.fugitivenumber, T.fugitivenumber + 1))
del T.tempdist
del T.fugitivenumber
self.locales = [x for x in self.locales if x != locale]
def runEpoch(self):
epochHeader = "EPOCH %i/%i" % (
self.EPOCH,
self.conf.generation.NBEPOCH
)
print("\t====== %s ======" % epochHeader)
epochStartTime = time.time()
if self.web:
self.epochInfo = epochHeader
self.web.updateWorldGraph(app=self.web, WORLD=self)
for LOCALE in self.locales:
LOCALE.run()
if self.web:
self.web.updateLocaleGraph(app=self.web, LOCALE=LOCALE)
self.worldLoops[0](self)
self.EPOCH += 1
epochRunTime = time.time() - epochStartTime
print("Epoch runs in %.2f seconds;" % epochRunTime)
if not self.EPOCH % 10:
print("Backend power %s" % self.parallel.lasttimesperind)
print("")
@staticmethod
def calculateDistance(point1, point2):
x = abs(point1[0] - point2[0])
y = abs(point1[1] - point2[1])
D = math.sqrt(x ** 2 + y ** 2)
return D
def seedEnvironment(self):
# round to nearest square number
self.sectorSeedRoot = round(math.sqrt(self.conf.generation.worldSeedSize))
self.environmentSectors = []
for i in range(self.sectorSeedRoot):
row = []
for j in range(self.sectorSeedRoot):
ENV = self.onInitLocale(self)
row.append(ENV)
self.environmentSectors.append(row)
def loadDatasetForLocalePosition(self, position):
pos = [math.floor(P / self.size[p] * self.sectorSeedRoot)
for p, P in enumerate(position)]
return self.environmentSectors[pos[0]][pos[1]]
def localeWalk(self, locale):
ammount = self.conf.generation.localeWalkDistance
variation = [random.randrange(-ammount, ammount)
for i in range(2)]
for i in range(2):
locale.position[i] += variation[i]
# put it inside boundaries,
# make world appear rounded like our planet :3;
# fix too low values
while locale.position[i] < 0:
locale.position[i] += self.size[i]
# fix too high values
locale.position[i] = locale.position[i] % self.size[i]
================================================
FILE: requirements.txt
================================================
ccxt==1.13.139
pandas_datareader==0.5.0
numpy==1.16.2
tulipy==0.2
pandas==0.18.1
deap==1.2.2
scipy==0.19.0
pytoml==0.1.16
Js2Py==0.59
Flask_Caching==1.4.0
Quandl==3.4.0
dash==0.39.0
dash-daq==0.1.0
Flask==1.0.2
requests>=2.20.0
bayesian_optimization==0.6.0
zipline==1.2.0
arch==4.3.1
names==0.3.0
matplotlib==2.2.2
python_dateutil==2.7.3
pytz==2018.5
scikit_learn==0.19.2
waitress
================================================
FILE: settings/_Global.toml
================================================
gekkoPath = '$HOME/gekko'
configFilename = 'example-config.js'
log_name = 'evolution_gen.csv'
# Hosts list of remote machines running gekko, to distribute evaluation load;
# option values: path to HOSTS file list OR False;
RemoteAWS = '../AmazonSetup/hosts'
# Your gekko local URL - CHECK THIS!
# gekko:3000 stands for the default url at docker-compose scheme of things.
GekkoURLs = ['http://localhost:3000', 'http://gekko:3000']
showFailedStrategies = true
================================================
FILE: settings/_backtest.toml
================================================
# show gekko verbose (strat info) - gekko must start with -d flag;
gekkoDebug = 0
# time window size on days of candlesticks for each evaluation
deltaDays = 90
# candle size for gekko backtest in minutes
candleSize = 10
# mode of profit interpretation = v1, v2 or v3.
# please check the first functions at evaluation.gekko.backtest
# to understand what is this. has big impact on evolutionary agenda.
interpreteBacktestProfit = 'v3'
# Number of candlestick data loaded simultaneously in each locale;
# slower EPOCHS theoretical better evolution;
# seems broken. values other than 1 makes evolution worse.
ParallelCandlestickDataset = 1
# number of parallel backtests running on gekko;
ParallelBacktests = 6
================================================
FILE: settings/_bayesian.toml
================================================
================================================
FILE: settings/_binance.toml
================================================
credentialsFilePath = ""
strategyRunTimePeriodHours = 12
strategySelectorSigma = 10
# following option points to binance asset/currency .json file located at
# 'exchanges' folder of gekko. selecting it up is optional.
binanceAssetCurrencyTargetFilePath = ""
================================================
FILE: settings/_dataset.toml
================================================
# span in days from the end of dataset to the beggining. Or zero.
# (to restrain length);
dataset_span = 0
# span for evaluation dataset. same scheme.
eval_dataset_span = 0
# -- Gekko Dataset Settings
# in order to enable dataset selection, turn off autoselect entry;
[dataset_source]
autoselect = true
exchange = 'kraken'
currency = 'USD'
asset = 'LTC'
[dataset_source2]
autoselect = true
exchange = 'kraken'
currency = 'USD'
asset = 'LTC'
[eval_dataset_source]
autoselect = true
exchange = 'kraken'
currency = 'USD'
asset = 'LTC'
================================================
FILE: settings/_evalbreak.toml
================================================
# number of individues selected by score on each evaluation break for each locale;
NBBESTINDS = 1
# number of individues randomly selected on each evaluation break for each locale;
NBADDITIONALINDS = 4
# show current best settings on every X epochs. (or False)
evaluateSettingsPeriodically = 50
# number of evaluations on evaluation break. for each selected individue on locales;
proofSize = 12
================================================
FILE: settings/_generation.toml
================================================
# Verbose single evaluation results;
showIndividualEvaluationInfo = false
# if parameter is set to value rather than tuple limits at settings make the value
# a tuple based on chosen spread value (percents); value = 10 --spread=50--> value = (515)
parameter_spread = 60
# Initial population size per locale
POP_SIZE = 50
# number of epochs to run
NBEPOCH = 3000
# number of locales on parallel GA;
NBLOCALE = 3
# -- Genetic Algorithm Parameters
cxpb = 0.8 # Probabilty of crossover
mutpb = 0.2# Probability of mutation;
_lambda = 14# size of offspring generated per epoch;
# weight of PRoFIGA calculations on variability of population size
PRoFIGA_beta = 0.005
ageBoundaries = [9, 19] # minimum age to die age when everyone dies (on EPOCHS)
# after this age in epoches locale surely explodes i.e. ends.
localeExpirationAge = 100
# chance on each epoch of a locale to finish [in percentage];
localeExplodeChance = 2
# chance on each epoch of a locale creation [in percentage];
localeCreationChance = 2
# chance on each epoch of a locale to change position [in percentage];
localeWalkChance = 40
# max distance a locale can walk across world map on each epoch;
localeWalkDistance = 5
# number of different candle date ranges to seed world;
# no area overlap, no empty spaces.
# this rounds to the nearest square number.
worldSeedSize = 40
# filter individuals for minimum profit (or set to None)
minimumProfitFilter = -15
# filter individuals for minimum trade count; [has heavy impact] (or set to None)
TradeNumberFilterRange = [6, 300]
# filter individuals with roundtripe duration outside this range of values in hours (or set to None)
averageExposureLengthFilterRange = [0, 300]
# until another time range in dataset is selected;
# chromosome settings are for -gc mode which uses another GA internal representation mode
# for parameter values of each individue;
# check promoterz/representation/chromosome.py to see how it works.
# both parameters interact with crossover probability AKA cxpb;
# practical effects on evolution are really uknown;
[chromosome]
# length of the representation for each parameter. largers sizes should mantain
# evolutionary dynamics for parameters with proportional larger ranges;
GeneSize = 3
# number of parameters represented by each cromosome;
# this should mantain evolutionary dynamics for strategies with proportional larger
# parameter counts;
Density = 2
# weights to score each individual self explanatory;
[weights]
profit = 1.0
sharpe = 0.1
================================================
FILE: stratego/README.md
================================================
### Usage
This is a submodule of japonicus. This takes care of on-the-fly strategy creation and management.
Its on a very beta stage, like the rest of this GA implementation.
### TODO:
The method is to just sum indicators, I.E to buy, all indicators should be above threshold.
That can work, but more complex interactions between indicators should be implemented.
### Disclaimer:
stratego? japonicus? promoterz? from which depth of hell do those names come? hehehe
================================================
FILE: stratego/__init__.py
================================================
# 1/bin/python
from .import gekko_strategy
================================================
FILE: stratego/gekko_strategy.py
================================================
#!/bin/python
import os
import random
import hashlib
import re
from collections import OrderedDict
# from . import Settings
from .indicator_properties import *
# gekkoStratFolder = Settings('').Global['gekkoDir']+'/strategies/'
simplifyIndicators = lambda name: "var {I} = this.indicators.{i};".format(
i=name.lower(), I=name.upper()
)
addIndicatorText = lambda name: "this.addIndicator('{i}', '{I}', this.settings.{I}{A});".format(
i=name.lower(), I=name.upper(), A=IndicatorProperties[name]['input']
)
onlyLetters = lambda message: re.sub(r"[^A-Za-z]+", '', message)
class StrategyFileManager():
def __init__(self, gekkoPath, indicatorSettings):
self.gekkoStratFolder = gekkoPath + '/strategies/japonicus/'
self.gekkoIndicatorFolder = gekkoPath + '/strategies/indicators/'
if not os.path.isdir(self.gekkoStratFolder):
os.mkdir(self.gekkoStratFolder)
AllowedIndicators = list(IndicatorProperties.keys())
AllowedIndicators = [
ind for ind in AllowedIndicators if indicatorSettings[ind]['active']
]
baseContent = open('stratego/skeleton/ontrend.js').read()
self.baseMD5 = hashlib.md5(baseContent.encode('utf-8')).hexdigest()
self.sessionCreatedFiles = []
self.skeletonHeader = [
l for l in baseContent.split('\n') if '//JAPONICUS' in l
][
0
]
self.skeletonHeader = self.interpreteSkeletonHeader(self.skeletonHeader)
for I in range(len(AllowedIndicators)):
if not os.path.isfile(
"%s%s.js" % (self.gekkoIndicatorFolder, AllowedIndicators[I])
):
print("Indicator %s doesn't exist!" % AllowedIndicators[I])
AllowedIndicators[I] = None
self.AllowedIndicators = [x for x in AllowedIndicators if x]
if not self.AllowedIndicators:
exit("No usable indicators detected.")
def selectIndicator(self, chosenIndicators, phenotype, Type):
indicatorsOnPhenotype = [
ind for ind in phenotype.keys() if ind in IndicatorProperties.keys()
]
allOfType = [
ind
for ind in indicatorsOnPhenotype
if IndicatorProperties[ind]['group'] == Type
]
Indicators = sorted(
allOfType, key= lambda ind: phenotype[ind]['active'], reverse=True
)
chosenIndicatorNames = [
chosenIndicators[name] for name in chosenIndicators.keys()
]
for Ind in Indicators:
if Ind not in chosenIndicatorNames:
return Ind
raise RuntimeError("not enough indicators for strategy %s;" % Indicators)
def checkStrategy(self, phenotype):
AllIndicators = self.AllowedIndicators
Indicators = {}
for indicatorInternalName in self.skeletonHeader.keys():
selectedIndicatorType = self.skeletonHeader[indicatorInternalName]
selectedIndicator = self.selectIndicator(
Indicators, phenotype, selectedIndicatorType
)
Indicators.update({indicatorInternalName: selectedIndicator})
def sortIndicators(ind):
if ind in phenotype.keys():
return phenotype[ind]['active']
else:
return 0
FallbackIndicators = [x for x in AllIndicators if x in phenotype.keys()]
if not Indicators:
Indicators = sorted(FallbackIndicators, key=sortIndicators, reverse=True)[
0:2
]
if not Indicators:
exit("NO INDICATORS")
IndicatorNames = [Indicators[slot] for slot in Indicators.keys()]
StrategyFileName = 'j' + self.baseMD5[-4:] + ''.join(IndicatorNames)
stratpath = self.gekkoStratFolder + StrategyFileName + '.js'
if not os.path.isfile(stratpath):
print(self.sessionCreatedFiles)
self.createStrategyFile(Indicators, stratpath)
return 'japonicus/' + StrategyFileName
def interpreteSkeletonHeader(self, header):
Header = OrderedDict()
header = header.replace('//JAPONICUS:', '')
for segment in header.split(','):
if '|' in segment:
segment = segment.strip(' ').split('|')
print(segment)
Header[segment[0]] = onlyLetters(segment[1].lower())
return Header
def createStrategyFile(self, Indicators, stratpath):
BASE = open("stratego/skeleton/ontrend.js").read()
for Indicator in Indicators.keys():
BASE = BASE.replace(Indicator, Indicators[Indicator])
FILE = open(stratpath, 'w')
FILE.write(BASE)
print("Creating strategy %s file." % stratpath)
self.sessionCreatedFiles.append(stratpath)
FILE.close()
def _createStrategyFile(self, Indicators, stratpath):
BASE = open("stratego/skeleton/dumbsum.js").read()
InitIndicators = [addIndicatorText(ind) for ind in Indicators]
BASE = BASE.replace("//ADD_INDICATORS;", ('\n'.join(InitIndicators)))
SimplifyIndicators = [simplifyIndicators(ind) for ind in Indicators]
BASE = BASE.replace("//SIMPLIFY_INDICATORS;", ('\n'.join(SimplifyIndicators)))
BuyConditions = []
SellConditions = []
for ind in Indicators:
Bc = "%s.%s %s" % (
ind,
IndicatorProperties[ind]['attrname'],
IndicatorProperties[ind]['result'][0].format(i=ind),
)
Sc = "%s.%s %s" % (
ind,
IndicatorProperties[ind]['attrname'],
IndicatorProperties[ind]['result'][1].format(i=ind),
)
BuyConditions.append(Bc)
SellConditions.append(Sc)
BASE = BASE.replace(
"//BUYCONDITIONS;", "var BuyConditions = [%s];" % ', '.join(BuyConditions)
)
BASE = BASE.replace(
"//SELLCONDITIONS;",
"var SellConditions = [%s];" % ', '.join(SellConditions),
)
FILE = open(stratpath, 'w')
FILE.write(BASE)
print("Creating strategy %s file." % stratpath)
self.sessionCreatedFiles.append(stratpath)
FILE.close()
================================================
FILE: stratego/indicator_properties.py
================================================
#!/bin/python
stdResult = ["> this.settings.{i}.thresholds.up", "< this.settings.{i}.thresholds.down"]
againstPrice = ["> price", "< price"]
Reverse = lambda x: [x[1], x[0]]
IndicatorProperties = {
"ADX": {
"input": '', "attrname": "result", "result": stdResult, "group": "momentum"
},
"ATR": {
"input": '', "attrname": "result", "result": stdResult, "group": "volatility"
},
"PPO": {
"input": '', "attrname": "PPOhist", "result": stdResult, "group": "momentum"
},
"DEMA": {"attrname": "result", "result": stdResult, "input": '', "group": "trend"},
"RSI": {
"result": Reverse(stdResult),
"input": '',
"attrname": "result",
"group": "momentum",
},
"TSI": {
"input": '', "result": stdResult, "attrname": "result", "group": "momentum"
},
"LRC": {
"result": againstPrice,
"attrname": "result",
"input": '.depth',
"group": "trend",
},
"SMMA": {
"input": '', "attrname": 'result', "result": stdResult, "group": "overlap"
},
"CCI": {
"input": '', "result": stdResult, "attrname": 'result', "group": "momentum"
},
}
================================================
FILE: stratego/skeleton/dumbsum.js
================================================
// helpers
var _ = require('lodash');
var log = require('../../core/log.js');
// let's create our own method
var method = {};
method.init = function() {
this.age = 0;
this.currentTrend;
this.requiredHistory = 16;
this.persistence=0;
//ADD_INDICATORS;
this.addindicator('inda', '..INDA..', this.settings['..INDA..'])
}
// what happens on every new candle?
method.update = function(candle) {
}
method.log = function() {
}
method.validation = function(ConditionList)
{
var validNB = ConditionList.filter(function(s) { return s; }).length;
return validNB/ ConditionList.length;
}
method.checkPersistence = function(candidateAdvice)
{
if (this.persistence >= this.settings.persistence)
this.advice(candidateAdvice);
else
this.advice();
}
method.check = function(candle) {
var price = candle.close;
//SIMPLIFY_INDICATORS;
//BUYCONDITIONS;
//SELLCONDITIONS;
this.age++;
if (this.validation(BuyConditions) > 0.6)
{
if(this.currentTrend !== 'up') {
this.currentTrend = 'up';
this.advice();
this.persistence=0;
} else{
this.persistence++;
this.checkPersistence('long');
}
}
else if (this.validation(SellConditions) > 0.6)
{
if (this.currentTrend !== 'down') {
this.currentTrend = 'down';
this.advice();
this.persistence=0;
} else{
this.persistence++;
this.checkPersistence('short');
}
} else {
this.advice();
}
}
module.exports = method;
================================================
FILE: stratego/skeleton/ontrend.js
================================================
/*
skeleton adapted from former strategy:
RSI Bull and Bear + ADX modifier
1. Use different RSI-strategies depending on a longer trend
2. But modify this slighly if shorter BULL/BEAR is detected
-
12 feb 2017
-
(CC-BY-SA 4.0) Tommie Hansen
https://creativecommons.org/licenses/by-sa/4.0/
*/
// req's
var log = require ('../../core/log.js');
var config = require ('../../core/util.js').getConfig();
// strategy
var strat = {
/* INIT */
init: function()
{
this.name = 'RSI Bull and Bear ADX';
this.requiredHistory = 10//config.tradingAdvisor.historySize;
this.resetTrend();
// debug? set to flase to disable all logging/messages/stats (improves performance)
this.debug = true;
// performance
//config.backtest.batchSize = 1000; // increase performance
//config.silent = true;
//config.debug = false;
//JAPONICUS:BULLMOM|MOMENTUM,BEARMOM|MOMENTUM,SECMOM|MOMENTUM;
// SMA
this.addIndicator('maSlow', 'SMA', this.settings.SMA_long );
this.addIndicator('maFast', 'SMA', this.settings.SMA_short );
// RSI
this.addIndicator('BULL_momentum', 'BULLMOM', this.settings['BULLMOM'] );
this.addIndicator('BEAR_momentum', 'BEARMOM', this.settings['BEARMOM'] );
// ADX
this.addIndicator('secondary_momentum', 'SECMOM', this.settings['SECMOM'] )
// debug stuff
this.startTime = new Date();
// add min/max if debug
if( this.debug ){
this.stat = {
adx: { min: 1000, max: 0 },
bear: { min: 1000, max: 0 },
bull: { min: 1000, max: 0 }
};
}
}, // init()
/* RESET TREND */
resetTrend: function()
{
var trend = {
duration: 0,
direction: 'none',
longPos: false,
};
this.trend = trend;
},
/* CHECK */
check: function()
{
// get all indicators
let ind = this.indicators,
maSlow = ind.maSlow.result,
maFast = ind.maFast.result,
sec = this.indicators.secondary_momentum.result;
// BEAR TREND
if( maFast < maSlow )
{
var momentum = ind.BEAR_momentum.result;
let momentum_hi = this.settings['BEARMOM'].thresholds.up,
momentum_low = this.settings['BEARMOM'].thresholds.down;
// ADX trend strength?
if( sec > this.settings['SECMOM'].thresholds.up ) momentum_hi = momentum_hi + 15;
else if( sec < this.settings['SECMOM'].thresholds.down ) momentum_low = momentum_low -5;
if( momentum > momentum_hi ) this.short();
else if( momentum < momentum_low ) this.long();
}
// BULL TREND
else
{
var momentum = ind.BULL_momentum.result;
let momentum_hi = this.settings['BULLMOM'].thresholds.up,
momentum_low = this.settings['BULLMOM'].thresholds.down;
// ADX trend strength?
if( sec > this.settings['SECMOM'].thresholds.up ) momentum_hi = momentum_hi + 5;
else if( sec < this.settings['SECMOM'].thresholds.down ) momentum_low = momentum_low -5;
if( momentum > momentum_hi ) this.short();
else if( momentum < momentum_low ) this.long();
}
// add adx low/high if debug
}, // check()
/* LONG */
long: function()
{
if( this.trend.direction !== 'up' ) // new trend? (only act on new trends)
{
this.resetTrend();
this.trend.direction = 'up';
this.advice('long');
if( this.debug ) log.info('Going long');
}
if( this.debug )
{
this.trend.duration++;
log.info('Long since', this.trend.duration, 'candle(s)');
}
},
/* SHORT */
short: function()
{
// new trend? (else do things)
if( this.trend.direction !== 'down' )
{
this.resetTrend();
this.trend.direction = 'down';
this.advice('short');
if( this.debug ) log.info('Going short');
}
if( this.debug )
{
this.trend.duration++;
log.info('Short since', this.trend.duration, 'candle(s)');
}
},
/* END backtest */
end: function()
{
let seconds = ((new Date()- this.startTime)/1000),
minutes = seconds/60,
str;
minutes < 1 ? str = seconds.toFixed(2) + ' seconds' : str = minutes.toFixed(2) + ' minutes';
log.info('====================================');
log.info('Finished in ' + str);
log.info('====================================');
// print stats and messages if debug
if(this.debug)
{
let stat = this.stat;
log.info('BEAR RSI low/high: ' + stat.bear.min + ' / ' + stat.bear.max);
log.info('BULL RSI low/high: ' + stat.bull.min + ' / ' + stat.bull.max);
log.info('ADX min/max: ' + stat.adx.min + ' / ' + stat.adx.max);
}
}
};
module.exports = strat;
================================================
FILE: strategy_parameters/BBRSI.toml
================================================
interval = 14
[thresholds]
low = 40
high = 40
persistence = 9
[bbands]
TimePeriod = 20
NbDevUp = 0.7
NbDevDn = 0.7
================================================
FILE: strategy_parameters/DUAL_RSI_BULL_BEAR.toml
================================================
[RBB1]
SMA_long = 1000
SMA_short = 50
#BULL
BULL_RSI = 10
BULL_RSI_high = 80
BULL_RSI_low = 60
#BEAR
BEAR_RSI = 15
BEAR_RSI_high = 60
BEAR_RSI_low = 20
#ADX
ADX = 3
ADX_high = 70
ADX_low = 50
[RBB2]
SMA_long = 1000
SMA_short = 50
#BULL
BULL_RSI = 10
BULL_RSI_high = 80
BULL_RSI_low = 60
#BEAR
BEAR_RSI = 15
BEAR_RSI_high = 60
BEAR_RSI_low = 20
#ADX
ADX = 3
ADX_high = 70
ADX_low = 50
================================================
FILE: strategy_parameters/HL_TS.toml
================================================
# Minimum volume needed to enter a trade
Min24hUSDVolume = 500000
RollingVolumeHours = 24
CurrencyPrice = 9000
CandleSize = 5
# Do not enter in bearish trend
# if Med>Slow (bullish)
# else if Fast>Med (slightly bullish in bearish trend)
SMA_Fast = 50
SMA_Medium = 200
SMA_Slow = 500
DarvasPeriodSize = 24
NoTradeResetPeriod = 3
StopLossPercent = -5
MaxLongPositionHours = 12
[psar]
optInStart = 0.0
optInAcceleration = 0.25
optInMaximum = 0.5
================================================
FILE: strategy_parameters/NEO.toml
================================================
# Source: https://raw.githubusercontent.com/gcobs0834/gekko/develop/config/strategies/NEO.toml
# SMA Trends
SMA_long = 150
SMA_short = 40
# BULL
BULL_RSI = 10
BULL_RSI_high = 80
BULL_RSI_low = 50
# IDLE
IDLE_RSI = 12
IDLE_RSI_high = 65
IDLE_RSI_low = 39
# BEAR
BEAR_RSI = 15
BEAR_RSI_high = 50
BEAR_RSI_low = 25
# ROC
ROC = 6
ROC_lvl = 0
# BULL/BEAR is defined by the longer SMA trends
# if SHORT over LONG = BULL
# if SHORT under LONG = BEAR
# ROC is the LENGHT (averaging)
# Leave ROC_lvl at 0 otherwise Results are negative
================================================
FILE: strategy_parameters/NEObigjap.toml
================================================
# SETTINGS FOUND FOR NEO STRAT AT EPOCH 15.000;
# INTERNAL BACKTESTS RETURNED GREAT SCORE;
BEAR_RSI_low = 23.106060606060606
IDLE_RSI_high = 59.68181818181818
BEAR_RSI = 11.863636363636363
BULL_RSI_low = 47.121212121212125
SMA_short = 36.0
IDLE_RSI = 11.527272727272727
ROC = 4.781818181818182
IDLE_RSI_low = 34.154545454545456
BULL_RSI_high = 84.12121212121212
ROC_lvl = 0.0
BULL_RSI = 9.121212121212121
BEAR_RSI_high = 42.878787878787875
SMA_long = 145.0
================================================
FILE: strategy_parameters/PPO.toml
================================================
short = 12
long = 26
signal = 9
[thresholds]
down = -0.025
up = 0.025
persistence = 2
================================================
FILE: strategy_parameters/RBB_ADX2_BB.toml
================================================
[ADX]
adx = 3.0
high = 50
[BBands]
NbDevDn = 2.0
NbDevUp = 2.0
TimePeriod = 20.0
[BBtrend]
bearPersistence = 16
bullPersistence = 11
lowerThreshold = 50
upperThreshold = 86
[BEAR]
high = 60.0
low = 29.4
mod_high = 1.5
mod_low = -1.5
rsi = 9.2
[BULL]
high = 90
low = 37
mod_high = 6
mod_low = -13.5
rsi = 13.2
[SMA]
long = 1000.0
short = 50.0
================================================
FILE: strategy_parameters/RSI_BULL_BEAR.toml
================================================
SMA_long = [800, 1000]
SMA_short = 50
BULL_RSI = 10
BULL_RSI_high = 80
BULL_RSI_low = 60
BEAR_RSI = 15
BEAR_RSI_high = 60
BEAR_RSI_low = 20
ADX = 3
ADX_high = 70
ADX_low = 50
================================================
FILE: strategy_parameters/RSI_BULL_BEAR_ADX.toml
================================================
SMA_long = 1000
SMA_short = 50
BULL_RSI = 10
BULL_RSI_high = 80
BULL_RSI_low = 60
BEAR_RSI = 15
BEAR_RSI_high = 60
BEAR_RSI_low = 20
ADX = 3
ADX_high = 70
ADX_low = 50
short = 10
low = 80
mod = 20
================================================
FILE: strategy_parameters/RSI_BULL_BEAR_x2.toml
================================================
# MAJOR SMA TRENDS
MAJOR_SMA_long = 2000
MAJOR_SMA_short = 500
# MAJOR BULL TREND
# SMA Trends
BULL__SMA_long = 1000
BULL__SMA_short = 50
# BULL
BULL__BULL_RSI = 10
BULL__BULL_RSI_high = 80
BULL__BULL_RSI_low = 60
# BEAR
BULL__BEAR_RSI = 15
BULL__BEAR_RSI_high = 50
BULL__BEAR_RSI_low = 20
# MAJOR BEAR TREND
# SMA Trends
BEAR__SMA_long = 1000
BEAR__SMA_short = 50
# BULL
BEAR__BULL_RSI = 10
BEAR__BULL_RSI_high = 80
BEAR__BULL_RSI_low = 60
# BEAR
BEAR__BEAR_RSI = 15
BEAR__BEAR_RSI_high = 50
BEAR__BEAR_RSI_low = 20
================================================
FILE: strategy_parameters/WRSI_BULL_BEAR.toml
================================================
SMA_long = 1000
SMA_short = 50
BULL_RSI = 10
BULL_RSI_high = 80
BULL_RSI_low = 60
BEAR_RSI = 15
BEAR_RSI_high = 60
BEAR_RSI_low = 20
ADX = 3
ADX_high = 70
ADX_low = 50
================================================
FILE: strategy_parameters/foxhole.toml
================================================
P0 = [-65.536, 65.536]
P1 = [-65.536, 65.536]
================================================
FILE: strategy_parameters/griewangk.toml
================================================
P0 = [-600, 600]
P1 = [-600, 600]
P2 = [-600, 600]
P3 = [-600, 600]
P4 = [-600, 600]
P5 = [-600, 600]
P6 = [-600, 600]
P7 = [-600, 600]
P8 = [-600, 600]
P9 = [-600, 600]
================================================
FILE: strategy_parameters/quartic.toml
================================================
P0 = [-1.28, 1.28]
P1 = [-1.28, 1.28]
P2 = [-1.28, 1.28]
P3 = [-1.28, 1.28]
P4 = [-1.28, 1.28]
P5 = [-1.28, 1.28]
P6 = [-1.28, 1.28]
P7 = [-1.28, 1.28]
P8 = [-1.28, 1.28]
P9 = [-1.28, 1.28]
P10 = [-1.28, 1.28]
P11 = [-1.28, 1.28]
P12 = [-1.28, 1.28]
P13 = [-1.28, 1.28]
P14 = [-1.28, 1.28]
P15 = [-1.28, 1.28]
P16 = [-1.28, 1.28]
P17 = [-1.28, 1.28]
P18 = [-1.28, 1.28]
P19 = [-1.28, 1.28]
P20 = [-1.28, 1.28]
P21 = [-1.28, 1.28]
P22 = [-1.28, 1.28]
P23 = [-1.28, 1.28]
P24 = [-1.28, 1.28]
P25 = [-1.28, 1.28]
P26 = [-1.28, 1.28]
P27 = [-1.28, 1.28]
P28 = [-1.28, 1.28]
P29 = [-1.28, 1.28]
================================================
FILE: strategy_parameters/rastrigin.toml
================================================
P0 = [-5.12, 5.12]
P1 = [-5.12, 5.12]
P2 = [-5.12, 5.12]
P3 = [-5.12, 5.12]
P4 = [-5.12, 5.12]
P5 = [-5.12, 5.12]
P6 = [-5.12, 5.12]
P7 = [-5.12, 5.12]
P8 = [-5.12, 5.12]
P9 = [-5.12, 5.12]
P10 = [-5.12, 5.12]
P11 = [-5.12, 5.12]
P12 = [-5.12, 5.12]
P13 = [-5.12, 5.12]
P14 = [-5.12, 5.12]
P15 = [-5.12, 5.12]
P16 = [-5.12, 5.12]
P17 = [-5.12, 5.12]
P18 = [-5.12, 5.12]
P19 = [-5.12, 5.12]
================================================
FILE: strategy_parameters/rosenbrock.toml
================================================
P0 = [-2.048, 2.048]
P1 = [-2.048, 2.048]
================================================
FILE: strategy_parameters/scalperNEO.toml
================================================
# SMA Trends
SMA_long = 150
SMA_short = 40
# BULL
BULL_RSI = 10
BULL_RSI_high = 80
BULL_RSI_low = 50
# IDLE
IDLE_RSI = 12
IDLE_RSI_high = 65
IDLE_RSI_low = 39
# BEAR
BEAR_RSI = 15
BEAR_RSI_high = 50
BEAR_RSI_low = 25
# ROC
ROC = 6
ROC_lvl = 0
scalperDelay = 7
scalperThresholdPercent = 2
================================================
FILE: strategy_parameters/scalperRBBA.toml
================================================
SMA_long = 1000
SMA_short = 50
BULL_RSI = 10
BULL_RSI_high = 80
BULL_RSI_low = 60
BEAR_RSI = 15
BEAR_RSI_high = 60
BEAR_RSI_low = 20
ADX = 3
ADX_high = 70
ADX_low = 50
scalperDelay = 7
scalperThresholdPercent = 2
================================================
FILE: strategy_parameters/schwefel.toml
================================================
P0 = [-500, 500]
P1 = [-500, 500]
P2 = [-500, 500]
P3 = [-500, 500]
P4 = [-500, 500]
P5 = [-500, 500]
P6 = [-500, 500]
P7 = [-500, 500]
P8 = [-500, 500]
P9 = [-500, 500]
================================================
FILE: utilities/importer.sh
================================================
#!/bin/bash
# To run GAs one needs candlestick datasets to backtest.
# Grabbing that data on a VPS can be a pain, so thats an automated tool that grabs some interesting datasets;
GekkoPath="${HOME}/gekko"
japonicusRelativeToGekko="../japonicus"
configs=($(ls|grep ".js"))
echo $configs
for conf in "${configs[@]}"
do
node ${GekkoPath}/gekko.js -i -c ${japonicusRelativeToGekko}/utilities/${conf}
done
================================================
FILE: utilities/poloUSDTBTC.js
================================================
// Everything is explained here:
// @link https://gekko.wizb.it/docs/commandline/plugins.html
var config = {};
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// GENERAL SETTINGS
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
config.trader={};
config.debug = true; // for additional logging / debugging
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// WATCHING A MARKET
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
config.watch = {
// see https://gekko.wizb.it/docs/introduction/supported_exchanges.html
exchange: 'poloniex',
currency: 'USDT',
asset: 'BTC',
}
config.adapter = 'sqlite';
config.sqlite = {
path: 'plugins/sqlite',
dataDirectory: 'history',
version: 0.1,
journalMode: 'WAL', // setting this to 'DEL' may prevent db locking on windows
dependencies: []
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// CONFIGURING IMPORTING
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
config.importer = {
daterange: {
// NOTE: these dates are in UTC
from: "2017-01-01 00:00:00"
}
}
config.candleWriter = {
enabled: true,
}
// set this to true if you understand that Gekko will
// invest according to how you configured the indicators.
// None of the advice in the output is Gekko telling you
// to take a certain position. Instead it is the result
// of running the indicators you configured automatically.
//
// In other words: Gekko automates your trading strategies,
// it doesn't advice on itself, only set to true if you truly
// understand this.
//
// Not sure? Read this first: https://github.com/askmike/gekko/issues/201
config['I understand that Gekko only automates MY OWN trading strategies'] = false;
module.exports = config;
================================================
FILE: utilities/poloUSDTETH.js
================================================
// Everything is explained here:
// @link https://gekko.wizb.it/docs/commandline/plugins.html
var config = {};
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// GENERAL SETTINGS
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
config.trader={};
config.debug = true; // for additional logging / debugging
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// WATCHING A MARKET
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
config.watch = {
// see https://gekko.wizb.it/docs/introduction/supported_exchanges.html
exchange: 'poloniex',
currency: 'USDT',
asset: 'ETH',
}
config.adapter = 'sqlite';
config.sqlite = {
path: 'plugins/sqlite',
dataDirectory: 'history',
version: 0.1,
journalMode: 'WAL', // setting this to 'DEL' may prevent db locking on windows
dependencies: []
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// CONFIGURING IMPORTING
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
config.importer = {
daterange: {
// NOTE: these dates are in UTC
from: "2017-01-01 00:00:00"
}
}
config.candleWriter = {
enabled: true,
}
// set this to true if you understand that Gekko will
// invest according to how you configured the indicators.
// None of the advice in the output is Gekko telling you
// to take a certain position. Instead it is the result
// of running the indicators you configured automatically.
//
// In other words: Gekko automates your trading strategies,
// it doesn't advice on itself, only set to true if you truly
// understand this.
//
// Not sure? Read this first: https://github.com/askmike/gekko/issues/201
config['I understand that Gekko only automates MY OWN trading strategies'] = false;
module.exports = config;
================================================
FILE: utilities/poloUSDTLTC.js
================================================
// Everything is explained here:
// @link https://gekko.wizb.it/docs/commandline/plugins.html
var config = {};
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// GENERAL SETTINGS
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
config.trader={};
config.debug = true; // for additional logging / debugging
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// WATCHING A MARKET
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
config.watch = {
// see https://gekko.wizb.it/docs/introduction/supported_exchanges.html
exchange: 'poloniex',
currency: 'USDT',
asset: 'LTC',
}
config.adapter = 'sqlite';
config.sqlite = {
path: 'plugins/sqlite',
dataDirectory: 'history',
version: 0.1,
journalMode: 'WAL', // setting this to 'DEL' may prevent db locking on windows
dependencies: []
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// CONFIGURING IMPORTING
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
config.importer = {
daterange: {
// NOTE: these dates are in UTC
from: "2017-01-01 00:00:00"
}
}
config.candleWriter = {
enabled: true,
}
// set this to true if you understand that Gekko will
// invest according to how you configured the indicators.
// None of the advice in the output is Gekko telling you
// to take a certain position. Instead it is the result
// of running the indicators you configured automatically.
//
// In other words: Gekko automates your trading strategies,
// it doesn't advice on itself, only set to true if you truly
// understand this.
//
// Not sure? Read this first: https://github.com/askmike/gekko/issues/201
config['I understand that Gekko only automates MY OWN trading strategies'] = false;
module.exports = config;
================================================
FILE: version.py
================================================
#!/bin/python
VERSION = 0.92