[
  {
    "path": ".gitignore",
    "content": "\n*.pyc\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2018 Xiang Gao\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "\n# **Playing trading games with deep reinforcement learning**\n\nThis repo is the code for this [paper](https://arxiv.org/abs/1803.03916). Deep reinforcement learing is used to find optimal strategies in these two scenarios:\n* Momentum trading: capture the underlying dynamics\n* Arbitrage trading: utilize the hidden relation among the inputs\n\nSeveral neural networks are compared: \n* Recurrent Neural Networks (GRU/LSTM)\n* Convolutional Neural Network (CNN)\n* Multi-Layer Perception (MLP)\n\n### Dependencies\n\nYou can get all dependencies via the [Anaconda](https://conda.io/docs/user-guide/tasks/manage-environments.html#creating-an-environment-from-an-environment-yml-file) environment file, [env.yml](https://github.com/golsun/deep-RL-time-series/blob/master/env.yml):\n\n    conda env create -f env.yml\n\n### Play with it\nJust call the main function\n\n    python main.py\n\nYou can play with model parameters (specified in main.py), if you get good results or any trouble, please contact me at gxiang1228@gmail.com\n"
  },
  {
    "path": "data/PairSamplerDB/randjump_100,1(10, 30)[]_A/param.json",
    "content": "{\"n_episodes\": 100, \"title\": \"randjump(5, (10, 30), 1, [])\", \"window_episode\": 180, \"forecast_horizon_range\": [10, 30], \"max_change_perc\": 30.0, \"noise_level\": 5, \"n_section\": 1, \"n_var\": 2}"
  },
  {
    "path": "data/PairSamplerDB/randjump_100,1(10, 30)[]_B/param.json",
    "content": "{\"n_episodes\": 100, \"title\": \"randjump(5, (10, 30), 1, [])\", \"window_episode\": 180, \"forecast_horizon_range\": [10, 30], \"max_change_perc\": 30.0, \"noise_level\": 5, \"n_section\": 1, \"n_var\": 2}"
  },
  {
    "path": "data/SinSamplerDB/concat_half_base_A/param.json",
    "content": "{\"n_episodes\": 100, \"title\": \"ConcatHalfSin+Base(0.5, (10, 40), (5, 80))\", \"window_episode\": 180, \"noise_amplitude_ratio\": 0.5, \"period_range\": [10, 40], \"amplitude_range\": [5, 80], \"can_half_period\": true}"
  },
  {
    "path": "data/SinSamplerDB/concat_half_base_B/param.json",
    "content": "{\"n_episodes\": 100, \"title\": \"ConcatHalfSin+Base(0.5, (10, 40), (5, 80))\", \"window_episode\": 180, \"noise_amplitude_ratio\": 0.5, \"period_range\": [10, 40], \"amplitude_range\": [5, 80], \"can_half_period\": true}"
  },
  {
    "path": "env.yml",
    "content": "name: drlts\nchannels:\n  - defaults\ndependencies:\n  - ca-certificates=2018.03.07=0\n  - certifi=2018.4.16=py36_0\n  - h5py=2.7.1=py36h39cdac5_0\n  - hdf5=1.10.1=ha036c08_1\n  - intel-openmp=2018.0.0=8\n  - keras=2.1.5=py36_0\n  - libcxx=4.0.1=h579ed51_0\n  - libcxxabi=4.0.1=hebd6815_0\n  - libedit=3.1=hb4e282d_0\n  - libffi=3.2.1=h475c297_4\n  - libgfortran=3.0.1=h93005f0_2\n  - libprotobuf=3.5.2=h2cd40f5_0\n  - mkl=2018.0.2=1\n  - ncurses=6.0=hd04f020_2\n  - numpy=1.12.1=py36h8871d66_1\n  - openssl=1.0.2o=h26aff7b_0\n  - pandas=0.22.0=py36h0a44026_0\n  - pip=9.0.3=py36_0\n  - protobuf=3.5.2=py36h0a44026_0\n  - python=3.6.5=hc167b69_0\n  - python-dateutil=2.7.2=py36_0\n  - pytz=2018.4=py36_0\n  - pyyaml=3.12=py36h2ba1e63_1\n  - readline=7.0=hc1231fa_4\n  - scipy=1.0.1=py36hcaad992_0\n  - setuptools=39.0.1=py36_0\n  - six=1.11.0=py36h0e22d5e_1\n  - sqlite=3.23.1=hf1716c9_0\n  - tensorflow=1.1.0=np112py36_0\n  - tk=8.6.7=h35a86e2_3\n  - werkzeug=0.14.1=py36_0\n  - wheel=0.31.0=py36_0\n  - xz=5.2.3=h0278029_2\n  - yaml=0.1.7=hc338f04_2\n  - zlib=1.2.11=hf3cbc9b_2\n\n"
  },
  {
    "path": "src/agents.py",
    "content": "from lib import *\n\nclass Agent:\n\n\tdef __init__(self, model, \n\t\tbatch_size=32, discount_factor=0.95):\n\n\t\tself.model = model\n\t\tself.batch_size = batch_size\n\t\tself.discount_factor = discount_factor\n\t\tself.memory = []\n\n\n\tdef remember(self, state, action, reward, next_state, done, next_valid_actions):\n\t\tself.memory.append((state, action, reward, next_state, done, next_valid_actions))\n\n\n\tdef replay(self):\n\t\tbatch = random.sample(self.memory, min(len(self.memory), self.batch_size))\n\t\tfor state, action, reward, next_state, done, next_valid_actions in batch:\n\t\t\tq = reward\n\t\t\tif not done:\n\t\t\t\tq += self.discount_factor * np.nanmax(self.get_q_valid(next_state, next_valid_actions))\n\t\t\tself.model.fit(state, action, q)\n\n\n\tdef get_q_valid(self, state, valid_actions):\n\t\tq = self.model.predict(state)\n\t\tq_valid = [np.nan] * len(q)\n\t\tfor action in valid_actions:\n\t\t\tq_valid[action] = q[action]\n\t\treturn q_valid\n\n\n\tdef act(self, state, exploration, valid_actions):\n\t\tif np.random.random() > exploration:\n\t\t\tq_valid = self.get_q_valid(state, valid_actions)\n\t\t\tif np.nanmin(q_valid) != np.nanmax(q_valid):\n\t\t\t\treturn np.nanargmax(q_valid)\n\t\treturn random.sample(valid_actions, 1)[0]\n\n\n\tdef save(self, fld):\n\t\tmakedirs(fld)\n\n\t\tattr = {\n\t\t\t'batch_size':self.batch_size, \n\t\t\t'discount_factor':self.discount_factor, \n\t\t\t#'memory':self.memory\n\t\t\t}\n\n\t\tpickle.dump(attr, open(os.path.join(fld, 'agent_attr.pickle'),'wb'))\n\t\tself.model.save(fld)\n\n\tdef load(self, fld):\n\t\tpath = os.path.join(fld, 'agent_attr.pickle')\n\t\tprint(path)\n\t\tattr = pickle.load(open(path,'rb'))\n\t\tfor k in attr:\n\t\t\tsetattr(self, k, attr[k])\n\t\tself.model.load(fld)\n\n\ndef add_dim(x, shape):\n\treturn np.reshape(x, (1,) + shape)\n\n\n\nclass QModelKeras:\n\t# ref: https://keon.io/deep-q-learning/\n\t\n\tdef init(self):\n\t\tpass\n\n\tdef build_model(self):\n\t\tpass\n\n\tdef __init__(self, state_shape, n_action):\n\t\tself.state_shape = state_shape\n\t\tself.n_action = n_action\n\t\tself.attr2save = ['state_shape','n_action','model_name']\n\t\tself.init()\n\n\n\tdef save(self, fld):\n\t\tmakedirs(fld)\n\t\twith open(os.path.join(fld, 'model.json'), 'w') as json_file:\n\t\t\tjson_file.write(self.model.to_json())\n\t\tself.model.save_weights(os.path.join(fld, 'weights.hdf5'))\n\n\t\tattr = dict()\n\t\tfor a in self.attr2save:\n\t\t\tattr[a] = getattr(self, a)\n\t\tpickle.dump(attr, open(os.path.join(fld, 'Qmodel_attr.pickle'),'wb'))\n\n\tdef load(self, fld, learning_rate):\n\t\tjson_str = open(os.path.join(fld, 'model.json')).read()\n\t\tself.model = keras.models.model_from_json(json_str)\n\t\tself.model.load_weights(os.path.join(fld, 'weights.hdf5'))\n\t\tself.model.compile(loss='mse', optimizer=keras.optimizers.Adam(lr=learning_rate))\n\n\t\tattr = pickle.load(open(os.path.join(fld, 'Qmodel_attr.pickle'), 'rb'))\n\t\tfor a in attr:\n\t\t\tsetattr(self, a, attr[a])\n\n\tdef predict(self, state):\n\t\tq = self.model.predict(\n\t\t\tadd_dim(state, self.state_shape)\n\t\t\t)[0]\n\t\t\n\t\tif np.isnan(max(q)):\n\t\t\tprint('state'+str(state))\n\t\t\tprint('q'+str(q))\n\t\t\traise ValueError\n\n\t\treturn q\n\n\tdef fit(self, state, action, q_action):\n\t\tq = self.predict(state)\n\t\tq[action] = q_action\n\n\t\tself.model.fit(\n\t\t\tadd_dim(state, self.state_shape), \n\t\t\tadd_dim(q, (self.n_action,)), \n\t\t\tepochs=1, verbose=0)\n\n\n\nclass QModelMLP(QModelKeras):\n\t# multi-layer perception (MLP), i.e., dense only\n\n\tdef init(self):\n\t\tself.qmodel = 'MLP'\t\n\n\tdef build_model(self, n_hidden, learning_rate, activation='relu'):\n\n\t\tmodel = keras.models.Sequential()\n\t\tmodel.add(keras.layers.Reshape(\n\t\t\t(self.state_shape[0]*self.state_shape[1],), \n\t\t\tinput_shape=self.state_shape))\n\n\t\tfor i in range(len(n_hidden)):\n\t\t\tmodel.add(keras.layers.Dense(n_hidden[i], activation=activation))\n\t\t\t#model.add(keras.layers.Dropout(drop_rate))\n\t\t\n\t\tmodel.add(keras.layers.Dense(self.n_action, activation='linear'))\n\t\tmodel.compile(loss='mse', optimizer=keras.optimizers.Adam(lr=learning_rate))\n\t\tself.model = model\n\t\tself.model_name = self.qmodel + str(n_hidden)\n\t\t\n\n\nclass QModelRNN(QModelKeras):\n\t\"\"\"\n\thttps://keras.io/getting-started/sequential-model-guide/#example\n\tnote param doesn't grow with len of sequence\n\t\"\"\"\n\n\tdef _build_model(self, Layer, n_hidden, dense_units, learning_rate, activation='relu'):\n\n\t\tmodel = keras.models.Sequential()\n\t\tmodel.add(keras.layers.Reshape(self.state_shape, input_shape=self.state_shape))\n\t\tm = len(n_hidden)\n\t\tfor i in range(m):\n\t\t\tmodel.add(Layer(n_hidden[i],\n\t\t\t\treturn_sequences=(i<m-1)))\n\t\tfor i in range(len(dense_units)):\n\t\t\tmodel.add(keras.layers.Dense(dense_units[i], activation=activation))\n\t\tmodel.add(keras.layers.Dense(self.n_action, activation='linear'))\n\t\tmodel.compile(loss='mse', optimizer=keras.optimizers.Adam(lr=learning_rate))\n\t\tself.model = model\n\t\tself.model_name = self.qmodel + str(n_hidden) + str(dense_units)\n\t\t\n\n\nclass QModelLSTM(QModelRNN):\n\tdef init(self):\n\t\tself.qmodel = 'LSTM'\n\tdef build_model(self, n_hidden, dense_units, learning_rate, activation='relu'):\n\t\tLayer = keras.layers.LSTM\n\t\tself._build_model(Layer, n_hidden, dense_units, learning_rate, activation)\n\n\nclass QModelGRU(QModelRNN):\n\tdef init(self):\n\t\tself.qmodel = 'GRU'\n\tdef build_model(self, n_hidden, dense_units, learning_rate, activation='relu'):\n\t\tLayer = keras.layers.GRU\n\t\tself._build_model(Layer, n_hidden, dense_units, learning_rate, activation)\n\n\n\nclass QModelConv(QModelKeras):\n\t\"\"\"\n\tref: https://keras.io/layers/convolutional/\n\t\"\"\"\n\tdef init(self):\n\t\tself.qmodel = 'Conv'\n\n\tdef build_model(self, \n\t\tfilter_num, filter_size, dense_units, \n\t\tlearning_rate, activation='relu', dilation=None, use_pool=None):\n\n\t\tif use_pool is None:\n\t\t\tuse_pool = [True]*len(filter_num)\n\t\tif dilation is None:\n\t\t\tdilation = [1]*len(filter_num)\n\n\t\tmodel = keras.models.Sequential()\n\t\tmodel.add(keras.layers.Reshape(self.state_shape, input_shape=self.state_shape))\n\t\t\n\t\tfor i in range(len(filter_num)):\n\t\t\tmodel.add(keras.layers.Conv1D(filter_num[i], kernel_size=filter_size[i], dilation_rate=dilation[i], \n\t\t\t\tactivation=activation, use_bias=True))\n\t\t\tif use_pool[i]:\n\t\t\t\tmodel.add(keras.layers.MaxPooling1D(pool_size=2))\n\t\t\n\t\tmodel.add(keras.layers.Flatten())\n\t\tfor i in range(len(dense_units)):\n\t\t\tmodel.add(keras.layers.Dense(dense_units[i], activation=activation))\n\t\tmodel.add(keras.layers.Dense(self.n_action, activation='linear'))\n\t\tmodel.compile(loss='mse', optimizer=keras.optimizers.Adam(lr=learning_rate))\n\t\t\n\t\tself.model = model\n\n\t\tself.model_name = self.qmodel + str([a for a in\n\t\t\tzip(filter_num, filter_size, dilation, use_pool)\n\t\t\t])+' + '+str(dense_units)\n\n\t\t\n\nclass QModelConvRNN(QModelKeras):\n\t\"\"\"\n\thttps://keras.io/getting-started/sequential-model-guide/#example\n\tnote param doesn't grow with len of sequence\n\t\"\"\"\n\n\tdef _build_model(self, RNNLayer, conv_n_hidden, RNN_n_hidden, dense_units, learning_rate, \n\t\tconv_kernel_size=3, use_pool=False, activation='relu'):\n\n\t\tmodel = keras.models.Sequential()\n\t\tmodel.add(keras.layers.Reshape(self.state_shape, input_shape=self.state_shape))\n\n\t\tfor i in range(len(conv_n_hidden)):\n\t\t\tmodel.add(keras.layers.Conv1D(conv_n_hidden[i], kernel_size=conv_kernel_size, \n\t\t\t\tactivation=activation, use_bias=True))\n\t\t\tif use_pool:\n\t\t\t\tmodel.add(keras.layers.MaxPooling1D(pool_size=2))\n\t\tm = len(RNN_n_hidden)\n\t\tfor i in range(m):\n\t\t\tmodel.add(RNNLayer(RNN_n_hidden[i],\n\t\t\t\treturn_sequences=(i<m-1)))\n\t\tfor i in range(len(dense_units)):\n\t\t\tmodel.add(keras.layers.Dense(dense_units[i], activation=activation))\n\n\t\tmodel.add(keras.layers.Dense(self.n_action, activation='linear'))\n\t\tmodel.compile(loss='mse', optimizer=keras.optimizers.Adam(lr=learning_rate))\n\t\tself.model = model\n\t\tself.model_name = self.qmodel + str(conv_n_hidden) + str(RNN_n_hidden) + str(dense_units)\n\t\t\n\nclass QModelConvLSTM(QModelConvRNN):\n\tdef init(self):\n\t\tself.qmodel = 'ConvLSTM'\n\tdef build_model(self, conv_n_hidden, RNN_n_hidden, dense_units, learning_rate, \n\t\tconv_kernel_size=3, use_pool=False, activation='relu'):\n\t\tLayer = keras.layers.LSTM\n\t\tself._build_model(Layer, conv_n_hidden, RNN_n_hidden, dense_units, learning_rate, \n\t\tconv_kernel_size, use_pool, activation)\n\n\nclass QModelConvGRU(QModelConvRNN):\n\tdef init(self):\n\t\tself.qmodel = 'ConvGRU'\n\tdef build_model(self, conv_n_hidden, RNN_n_hidden, dense_units, learning_rate, \n\t\tconv_kernel_size=3, use_pool=False, activation='relu'):\n\t\tLayer = keras.layers.GRU\n\t\tself._build_model(Layer, conv_n_hidden, RNN_n_hidden, dense_units, learning_rate, \n\t\tconv_kernel_size, use_pool, activation)\n\n\n\n\n\n\n\ndef load_model(fld, learning_rate):\n\ts = open(os.path.join(fld,'QModel.txt'),'r').read().strip()\n\tqmodels = {\n\t\t'Conv':QModelConv,\n\t\t'DenseOnly':QModelMLP,\n\t\t'MLP':QModelMLP,\n\t\t'LSTM':QModelLSTM,\n\t\t'GRU':QModelGRU,\n\t\t}\n\tqmodel = qmodels[s](None, None)\n\tqmodel.load(fld, learning_rate)\n\treturn qmodel\n\n\n"
  },
  {
    "path": "src/emulator.py",
    "content": "from lib import *\n\n# by Xiang Gao, 2018\n\n\n\ndef find_ideal(p, just_once):\n\tif not just_once:\n\t\tdiff = np.array(p[1:]) - np.array(p[:-1])\n\t\treturn sum(np.maximum(np.zeros(diff.shape), diff))\n\telse:\n\t\tbest = 0.\n\t\ti0_best = None\n\t\tfor i in range(len(p)-1):\n\t\t\tbest = max(best, max(p[i+1:]) - p[i])\n\n\t\treturn best\n\n\nclass Market:\n\t\"\"\"\n\tstate \t\t\tMA of prices, normalized using values at t\n\t\t\t\t\tndarray of shape (window_state, n_instruments * n_MA), i.e., 2D\n\t\t\t\t\twhich is self.state_shape\n\n\taction \t\t\tthree action\n\t\t\t\t\t0:\tempty, don't open/close. \n\t\t\t\t\t1:\topen a position\n\t\t\t\t\t2: \tkeep a position\n\t\"\"\"\n\t\n\tdef reset(self, rand_price=True):\n\t\tself.empty = True\n\t\tif rand_price:\n\t\t\tprices, self.title = self.sampler.sample()\n\t\t\tprice = np.reshape(prices[:,0], prices.shape[0])\n\n\t\t\tself.prices = prices.copy()\n\t\t\tself.price = price/price[0]*100\n\t\t\tself.t_max = len(self.price) - 1\n\n\t\tself.max_profit = find_ideal(self.price[self.t0:], False)\n\t\tself.t = self.t0\n\t\treturn self.get_state(), self.get_valid_actions()\n\n\n\tdef get_state(self, t=None):\n\t\tif t is None:\n\t\t\tt = self.t\n\t\tstate = self.prices[t - self.window_state + 1: t + 1, :].copy()\n\t\tfor i in range(self.sampler.n_var):\n\t\t\tnorm = np.mean(state[:,i])\n\t\t\tstate[:,i] = (state[:,i]/norm - 1.)*100\t\n\t\treturn state\n\n\tdef get_valid_actions(self):\n\t\tif self.empty:\n\t\t\treturn [0, 1]\t# wait, open\n\t\telse:\n\t\t\treturn [0, 2]\t# close, keep\n\n\n\tdef get_noncash_reward(self, t=None, empty=None):\n\t\tif t is None:\n\t\t\tt = self.t\n\t\tif empty is None:\n\t\t\tempty = self.empty\n\t\treward = self.direction * (self.price[t+1] - self.price[t])\n\t\tif empty:\n\t\t\treward -= self.open_cost\n\t\tif reward < 0:\n\t\t\treward *= (1. + self.risk_averse)\n\t\treturn reward\n\n\n\tdef step(self, action):\n\n\t\tdone = False\n\t\tif action == 0:\t\t# wait/close\n\t\t\treward = 0.\n\t\t\tself.empty = True\n\t\telif action == 1:\t# open\n\t\t\treward = self.get_noncash_reward()\n\t\t\tself.empty = False\n\t\telif action == 2:\t# keep\n\t\t\treward = self.get_noncash_reward()\n\t\telse:\n\t\t\traise ValueError('no such action: '+str(action))\n\n\t\tself.t += 1\n\t\treturn self.get_state(), reward, self.t == self.t_max, self.get_valid_actions()\n\n\n\tdef __init__(self, \n\t\tsampler, window_state, open_cost,\n\t\tdirection=1., risk_averse=0.):\n\n\t\tself.sampler = sampler\n\t\tself.window_state = window_state\n\t\tself.open_cost = open_cost\n\t\tself.direction = direction\n\t\tself.risk_averse = risk_averse\n\n\t\tself.n_action = 3\n\t\tself.state_shape = (window_state, self.sampler.n_var)\n\t\tself.action_labels = ['empty','open','keep']\n\t\tself.t0 = window_state - 1\n\n\nif __name__ == '__main__':\n\ttest_env()\n"
  },
  {
    "path": "src/lib.py",
    "content": "import random, os, datetime, pickle, json, keras, sys\nimport pandas as pd\n#import matplotlib.pyplot as plt\nimport numpy as np\n\nOUTPUT_FLD = os.path.join('..','results')\nPRICE_FLD = '/Users/xianggao/Dropbox/distributed/code_db/price coinbase/vm-w7r-db'\n\ndef makedirs(fld):\n\tif not os.path.exists(fld):\n\t\tos.makedirs(fld)\n"
  },
  {
    "path": "src/main.py",
    "content": "#!/usr/bin/env python2\n\nfrom lib import *\nfrom sampler import *\nfrom agents import *\nfrom emulator import *\nfrom simulators import *\nfrom visualizer import *\n\n\ndef get_model(model_type, env, learning_rate, fld_load):\n\n\tprint_t = False\n\texploration_init = 1.\n\n\tif model_type == 'MLP':\n\t\tm = 16\n\t\tlayers = 5\n\t\thidden_size = [m]*layers\n\t\tmodel = QModelMLP(env.state_shape, env.n_action)\n\t\tmodel.build_model(hidden_size, learning_rate=learning_rate, activation='tanh')\n\t\n\telif model_type == 'conv':\n\n\t\tm = 16\n\t\tlayers = 2\n\t\tfilter_num = [m]*layers\n\t\tfilter_size = [3] * len(filter_num)\n\t\t#use_pool = [False, True, False, True]\n\t\t#use_pool = [False, False, True, False, False, True]\n\t\tuse_pool = None\n\t\t#dilation = [1,2,4,8]\n\t\tdilation = None\n\t\tdense_units = [48,24]\n\t\tmodel = QModelConv(env.state_shape, env.n_action)\n\t\tmodel.build_model(filter_num, filter_size, dense_units, learning_rate, \n\t\t\tdilation=dilation, use_pool=use_pool)\n\n\telif model_type == 'RNN':\n\n\t\tm = 32\n\t\tlayers = 3\n\t\thidden_size = [m]*layers\n\t\tdense_units = [m,m]\n\t\tmodel = QModelGRU(env.state_shape, env.n_action)\n\t\tmodel.build_model(hidden_size, dense_units, learning_rate=learning_rate)\n\t\tprint_t = True\n\n\telif model_type == 'ConvRNN':\n\t\n\t\tm = 8\n\t\tconv_n_hidden = [m,m]\n\t\tRNN_n_hidden = [m,m]\n\t\tdense_units = [m,m]\n\t\tmodel = QModelConvGRU(env.state_shape, env.n_action)\n\t\tmodel.build_model(conv_n_hidden, RNN_n_hidden, dense_units, learning_rate=learning_rate)\n\t\tprint_t = True\n\n\telif model_type == 'pretrained':\n\t\tagent.model = load_model(fld_load, learning_rate)\n\n\telse:\n\t\traise ValueError\n\t\t\n\treturn model, print_t\n\n\ndef main():\n\n\t\"\"\"\n\tit is recommended to generate database usng sampler.py before run main\n\t\"\"\"\n\n\tmodel_type = 'conv'; exploration_init = 1.; fld_load = None\n\tn_episode_training = 1000\n\tn_episode_testing = 100\n\topen_cost = 3.3\n\t#db_type = 'SinSamplerDB'; db = 'concat_half_base_'; Sampler = SinSampler\n\tdb_type = 'PairSamplerDB'; db = 'randjump_100,1(10, 30)[]_'; Sampler = PairSampler\n\tbatch_size = 8\n\tlearning_rate = 1e-4\n\tdiscount_factor = 0.8\n\texploration_decay = 0.99\n\texploration_min = 0.01\n\twindow_state = 40\n\n\tfld = os.path.join('..','data',db_type,db+'A')\n\tsampler = Sampler('load', fld=fld)\n\tenv = Market(sampler, window_state, open_cost)\n\tmodel, print_t = get_model(model_type, env, learning_rate, fld_load)\n\tmodel.model.summary()\n\t#return\n\n\tagent = Agent(model, discount_factor=discount_factor, batch_size=batch_size)\n\tvisualizer = Visualizer(env.action_labels)\n\n\tfld_save = os.path.join(OUTPUT_FLD, sampler.title, model.model_name, \n\t\tstr((env.window_state, sampler.window_episode, agent.batch_size, learning_rate,\n\t\t\tagent.discount_factor, exploration_decay, env.open_cost)))\n\t\n\tprint('='*20)\n\tprint(fld_save)\n\tprint('='*20)\n\n\tsimulator = Simulator(agent, env, visualizer=visualizer, fld_save=fld_save)\n\tsimulator.train(n_episode_training, save_per_episode=1, exploration_decay=exploration_decay, \n\t\texploration_min=exploration_min, print_t=print_t, exploration_init=exploration_init)\n\t#agent.model = load_model(os.path.join(fld_save,'model'), learning_rate)\n\n\t#print('='*20+'\\nin-sample testing\\n'+'='*20)\n\tsimulator.test(n_episode_testing, save_per_episode=1, subfld='in-sample testing')\n\n\t\"\"\"\n\tfld = os.path.join('data',db_type,db+'B')\n\tsampler = SinSampler('load',fld=fld)\n\tsimulator.env.sampler = sampler\n\tsimulator.test(n_episode_testing, save_per_episode=1, subfld='out-of-sample testing')\n\t\"\"\"\n\t\n\nif __name__ == '__main__':\n\tmain()\n"
  },
  {
    "path": "src/sampler.py",
    "content": "from lib import *\n\ndef read_data(date, instrument, time_step):\n\tpath = os.path.join(PRICE_FLD, date, instrument+'.csv')\n\tif not os.path.exists(path):\n\t\tprint('no such file: '+path)\n\t\treturn None\n\n\tdf_raw = pd.read_csv(path, parse_dates=['time'], index_col='time')\n\tdf = df_raw.resample(time_step, how='last').fillna(method='ffill')\n\treturn df['spot'].values\n\n\n\nclass Sampler:\n\n\tdef load_db(self, fld):\n\n\t\tself.db = pickle.load(open(os.path.join(fld, 'db.pickle'),'rb'))\n\t\tparam = json.load(open(os.path.join(fld, 'param.json'),'rb'))\n\t\tself.i_db = 0\n\t\tself.n_db = param['n_episodes']\n\t\tself.sample = self.__sample_db\n\t\tfor attr in param:\n\t\t\tif hasattr(self, attr):\n\t\t\t\tsetattr(self, attr, param[attr])\n\t\tself.title = 'DB_'+param['title']\n\n\n\tdef build_db(self, n_episodes, fld):\n\t\tdb = []\n\t\tfor i in range(n_episodes):\n\t\t\tprices, title = self.sample()\n\t\t\tdb.append((prices, '[%i]_'%i+title))\n\t\tos.makedirs(fld)\t# don't overwrite existing fld\n\t\tpickle.dump(db, open(os.path.join(fld, 'db.pickle'),'wb'))\n\t\tparam = {'n_episodes':n_episodes}\n\t\tfor k in self.attrs:\n\t\t\tparam[k] = getattr(self, k)\n\t\tjson.dump(param, open(os.path.join(fld, 'param.json'),'w'))\n\n\n\tdef __sample_db(self):\n\t\tprices, title = self.db[self.i_db]\n\t\tself.i_db += 1\n\t\tif self.i_db == self.n_db:\n\t\t\tself.i_db = 0\n\t\treturn prices, title\n\n\n\nclass PairSampler(Sampler):\n\n\tdef __init__(self, game,\n\t\twindow_episode=None, forecast_horizon_range=None, max_change_perc=10., noise_level=10., n_section=1,\n\t\tfld=None, windows_transform=[]):\n\n\t\tself.window_episode = window_episode\n\t\tself.forecast_horizon_range = forecast_horizon_range\n\t\tself.max_change_perc = max_change_perc\n\t\tself.noise_level = noise_level\n\t\tself.n_section = n_section\n\t\tself.windows_transform = windows_transform\n\t\tself.n_var = 2 + len(self.windows_transform) # price, signal\n\n\t\tself.attrs = ['title', 'window_episode', 'forecast_horizon_range', \n\t\t\t'max_change_perc', 'noise_level', 'n_section', 'n_var']\n\t\tparam_str = str((self.noise_level, self.forecast_horizon_range, self.n_section, self.windows_transform))\n\n\t\tif game == 'load':\n\t\t\tself.load_db(fld)\n\t\telif game in ['randwalk','randjump']:\n\t\t\tself.__rand = getattr(self, '_PairSampler__'+game)\n\t\t\tself.sample = self.__sample\n\t\t\tself.title = game + param_str\n\t\telse:\n\t\t\traise ValueError\n\n\n\tdef __randwalk(self, l):\n\t\tchange = (np.random.random(l + self.forecast_horizon_range[1]) - 0.5) * 2 * self.max_change_perc/100\n\t\tforecast_horizon = random.randrange(self.forecast_horizon_range[0], self.forecast_horizon_range[1])\n\t\treturn change[:l], change[forecast_horizon: forecast_horizon + l], forecast_horizon\n\n\n\tdef __randjump(self, l):\n\t\tchange = [0.] * (l + self.forecast_horizon_range[1])\n\t\tn_jump = random.randrange(15,30)\n\t\tfor i in range(n_jump):\n\t\t\tt = random.randrange(len(change))\n\t\t\tchange[t] = (np.random.random() - 0.5) * 2 * self.max_change_perc/100\n\t\tforecast_horizon = random.randrange(self.forecast_horizon_range[0], self.forecast_horizon_range[1])\n\t\treturn change[:l], change[forecast_horizon: forecast_horizon + l], forecast_horizon\n\n\n\n\tdef __sample(self):\n\n\t\tL = self.window_episode\n\t\tif bool(self.windows_transform):\n\t\t\tL += max(self.windows_transform)\n\t\tl0 = L/self.n_section\n\t\tl1 = L\n\n\t\td_price = []\n\t\td_signal = []\n\t\tforecast_horizon = []\n\n\t\tfor i in range(self.n_section):\n\t\t\tif i == self.n_section - 1:\n\t\t\t\tl = l1\n\t\t\telse:\n\t\t\t\tl = l0\n\t\t\t\tl1 -= l0\n\t\t\td_price_i, d_signal_i, horizon_i = self.__rand(l)\n\t\t\td_price = np.append(d_price, d_price_i)\n\t\t\td_signal = np.append(d_signal, d_signal_i)\n\t\t\tforecast_horizon.append(horizon_i)\n\n\t\tprice = 100. * (1. + np.cumsum(d_price))\n\t\tsignal = 100. * (1. + np.cumsum(d_signal)) + \\\n\t\t\t\tnp.random.random(len(price)) * self.noise_level\n\n\t\tprice += (100 - min(price))\n\t\tsignal += (100 - min(signal))\n\n\t\tinputs = [price[-self.window_episode:], signal[-self.window_episode:]]\n\t\tfor w in self.windows_transform:\n\t\t\tinputs.append(signal[-self.window_episode - w: -w])\n\n\t\treturn np.array(inputs).T, 'forecast_horizon='+str(forecast_horizon)\n\n\n\n\nclass SinSampler(Sampler):\n\n\tdef __init__(self, game, \n\t\twindow_episode=None, noise_amplitude_ratio=None, period_range=None, amplitude_range=None,\n\t\tfld=None):\n\n\t\tself.n_var = 1\t# price only\n\n\t\tself.window_episode = window_episode\n\t\tself.noise_amplitude_ratio = noise_amplitude_ratio\n\t\tself.period_range = period_range\n\t\tself.amplitude_range = amplitude_range\n\t\tself.can_half_period = False\n\n\t\tself.attrs = ['title','window_episode', 'noise_amplitude_ratio', 'period_range', 'amplitude_range', 'can_half_period']\n\n\t\tparam_str = str((\n\t\t\tself.noise_amplitude_ratio, self.period_range, self.amplitude_range\n\t\t\t))\n\t\tif game == 'single':\n\t\t\tself.sample = self.__sample_single_sin\n\t\t\tself.title = 'SingleSin'+param_str\n\t\telif game == 'concat':\n\t\t\tself.sample = self.__sample_concat_sin\n\t\t\tself.title = 'ConcatSin'+param_str\n\t\telif game == 'concat_half':\n\t\t\tself.can_half_period = True\n\t\t\tself.sample = self.__sample_concat_sin\n\t\t\tself.title = 'ConcatHalfSin'+param_str\n\t\telif game == 'concat_half_base':\n\t\t\tself.can_half_period = True\n\t\t\tself.sample = self.__sample_concat_sin_w_base\n\t\t\tself.title = 'ConcatHalfSin+Base'+param_str\n\t\t\tself.base_period_range = (int(2*self.period_range[1]), 4*self.period_range[1])\n\t\t\tself.base_amplitude_range = (20,80)\n\t\telif game == 'load':\n\t\t\tself.load_db(fld)\n\t\telse:\n\t\t\traise ValueError\n\n\n\tdef __rand_sin(self, \n\t\tperiod_range=None, amplitude_range=None, noise_amplitude_ratio=None, full_episode=False):\n\n\t\tif period_range is None:\n\t\t\tperiod_range = self.period_range\n\t\tif amplitude_range is None:\n\t\t\tamplitude_range = self.amplitude_range\n\t\tif noise_amplitude_ratio is None:\n\t\t\tnoise_amplitude_ratio = self.noise_amplitude_ratio\n\n\t\tperiod = random.randrange(period_range[0], period_range[1])\n\t\tamplitude = random.randrange(amplitude_range[0], amplitude_range[1])\n\t\tnoise = noise_amplitude_ratio * amplitude\n\n\t\tif full_episode:\n\t\t\tlength = self.window_episode\n\t\telse:\n\t\t\tif self.can_half_period:\n\t\t\t\tlength = int(random.randrange(1,4) * 0.5 * period)\n\t\t\telse:\n\t\t\t\tlength = period\n\n\t\tp = 100. + amplitude * np.sin(np.array(range(length)) * 2 * 3.1416 / period)\n\t\tp += np.random.random(p.shape) * noise\n\n\t\treturn p, '100+%isin((2pi/%i)t)+%ie'%(amplitude, period, noise)\n\n\n\n\n\tdef __sample_concat_sin(self):\n\t\tprices = []\n\t\tp = []\n\t\twhile True:\n\t\t\tp = np.append(p, self.__rand_sin(full_episode=False)[0])\n\t\t\tif len(p) > self.window_episode:\n\t\t\t\tbreak\n\t\tprices.append(p[:self.window_episode])\n\t\treturn np.array(prices).T, 'concat sin'\n\n\tdef __sample_concat_sin_w_base(self):\n\t\tprices = []\n\t\tp = []\n\t\twhile True:\n\t\t\tp = np.append(p, self.__rand_sin(full_episode=False)[0])\n\t\t\tif len(p) > self.window_episode:\n\t\t\t\tbreak\n\t\tbase, base_title = self.__rand_sin(\n\t\t\tperiod_range=self.base_period_range, \n\t\t\tamplitude_range=self.base_amplitude_range, \n\t\t\tnoise_amplitude_ratio=0., \n\t\t\tfull_episode=True)\n\t\tprices.append(p[:self.window_episode] + base)\n\t\treturn np.array(prices).T, 'concat sin + base: '+base_title\n\t\t\t\n\tdef __sample_single_sin(self):\n\t\tprices = []\n\t\tfuncs = []\n\t\tp, func = self.__rand_sin(full_episode=True)\n\t\tprices.append(p)\n\t\tfuncs.append(func)\n\t\treturn np.array(prices).T, str(funcs)\n\n\n\n\n\ndef test_SinSampler():\n\n\twindow_episode = 180\n\twindow_state = 40\n\tnoise_amplitude_ratio = 0.5\n\tperiod_range = (10,40)\n\tamplitude_range = (5,80)\n\tgame = 'concat_half_base'\n\tinstruments = ['fake']\n\n\tsampler = SinSampler(game, \n\t\twindow_episode, noise_amplitude_ratio, period_range, amplitude_range)\n\tn_episodes = 100\n\t\"\"\"\n\tfor i in range(100):\n\t\tplt.plot(sampler.sample(instruments)[0])\n\t\tplt.show()\n\t\t\"\"\"\n\tfld = os.path.join('data','SinSamplerDB',game+'_B')\n\tsampler.build_db(n_episodes, fld)\n\n\n\ndef test_PairSampler():\n\tfhr = (10,30)\n\tn_section = 1\n\tmax_change_perc = 30.\n\tnoise_level = 5\n\tgame = 'randjump'\n\twindows_transform = []\n\n\tsampler = PairSampler(game, window_episode=180, forecast_horizon_range=fhr, \n\t\tn_section=n_section, noise_level=noise_level, max_change_perc=max_change_perc, windows_transform=windows_transform)\n\t\n\t#plt.plot(sampler.sample()[0]);plt.show()\n\t#\"\"\"\n\tn_episodes = 100\n\tfld = os.path.join('data','PairSamplerDB',\n\t\tgame+'_%i,%i'%(n_episodes, n_section)+str(fhr)+str(windows_transform)+'_B')\n\tsampler.build_db(n_episodes, fld)\n\t#\"\"\"\n\n\n\n\nif __name__ == '__main__':\n\t#scan_match()\n\ttest_SinSampler()\n\t#p = [1,2,3,2,1,2,3]\n\t#print find_ideal(p)\n\ttest_PairSampler()\n"
  },
  {
    "path": "src/simulators.py",
    "content": "from lib import *\n\n\n\nclass Simulator:\n\n\tdef play_one_episode(self, exploration, training=True, rand_price=True, print_t=False):\n\n\t\tstate, valid_actions = self.env.reset(rand_price=rand_price)\n\t\tdone = False\n\t\tenv_t = 0\n\t\ttry:\n\t\t\tenv_t = self.env.t\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\tcum_rewards = [np.nan] * env_t\n\t\tactions = [np.nan] * env_t\n\t\tstates = [None] * env_t\n\t\tprev_cum_rewards = 0.\n\n\t\twhile not done:\n\t\t\tif print_t:\n\t\t\t\tprint(self.env.t)\n    \n\n\t\t\taction = self.agent.act(state, exploration, valid_actions)\n\t\t\tnext_state, reward, done, valid_actions = self.env.step(action)\n\n\t\t\tcum_rewards.append(prev_cum_rewards+reward)\n\t\t\tprev_cum_rewards = cum_rewards[-1]\n\t\t\tactions.append(action)\n\t\t\tstates.append(next_state)\n\n\t\t\tif training:\n\t\t\t\tself.agent.remember(state, action, reward, next_state, done, valid_actions)\n\t\t\t\tself.agent.replay()\n\n\t\t\tstate = next_state\n\n\t\treturn cum_rewards, actions, states\n\n\n\tdef train(self, n_episode, \n\t\tsave_per_episode=10, exploration_decay=0.995, exploration_min=0.01, print_t=False, exploration_init=1.):\n\n\t\tfld_model = os.path.join(self.fld_save,'model')\n\t\tmakedirs(fld_model)\t# don't overwrite if already exists\n\t\twith open(os.path.join(fld_model,'QModel.txt'),'w') as f:\n\t\t\tf.write(self.agent.model.qmodel)\n\n\t\texploration = exploration_init\n\t\tfld_save = os.path.join(self.fld_save,'training')\n\n\t\tmakedirs(fld_save)\n\t\tMA_window = 100\t\t# MA of performance\n\t\tsafe_total_rewards = []\n\t\texplored_total_rewards = []\n\t\texplorations = []\n\t\tpath_record = os.path.join(fld_save,'record.csv')\n\n\t\twith open(path_record,'w') as f:\n\t\t\tf.write('episode,game,exploration,explored,safe,MA_explored,MA_safe\\n')\n\n\t\tfor n in range(n_episode):\n\n\t\t\tprint('\\ntraining...')\n\t\t\texploration = max(exploration_min, exploration * exploration_decay)\n\t\t\texplorations.append(exploration)\n\t\t\texplored_cum_rewards, explored_actions, _ = self.play_one_episode(exploration, print_t=print_t)\n\t\t\texplored_total_rewards.append(100.*explored_cum_rewards[-1]/self.env.max_profit)\n\t\t\tsafe_cum_rewards, safe_actions, _ = self.play_one_episode(0, training=False, rand_price=False, print_t=False)\n\t\t\tsafe_total_rewards.append(100.*safe_cum_rewards[-1]/self.env.max_profit)\n\n\t\t\tMA_total_rewards = np.median(explored_total_rewards[-MA_window:])\n\t\t\tMA_safe_total_rewards = np.median(safe_total_rewards[-MA_window:])\n\n\t\t\tss = [\n\t\t\t\tstr(n), self.env.title.replace(',',';'), '%.1f'%(exploration*100.), \n\t\t\t\t'%.1f'%(explored_total_rewards[-1]), '%.1f'%(safe_total_rewards[-1]),\n\t\t\t\t'%.1f'%MA_total_rewards, '%.1f'%MA_safe_total_rewards,\n\t\t\t\t]\n\t\t\t\n\t\t\twith open(path_record,'a') as f:\n\t\t\t\tf.write(','.join(ss)+'\\n')\n\t\t\t\tprint('\\t'.join(ss))\n\n\t\t\t\n\t\t\tif n%save_per_episode == 0:\n\t\t\t\tprint('saving results...')\n\t\t\t\tself.agent.save(fld_model)\n\n\t\t\t\t\"\"\"\n\t\t\t\tself.visualizer.plot_a_episode(\n\t\t\t\t\tself.env, self.agent.model, \n\t\t\t\t\texplored_cum_rewards, explored_actions,\n\t\t\t\t\tsafe_cum_rewards, safe_actions,\n\t\t\t\t\tos.path.join(fld_save, 'episode_%i.png'%(n)))\n\n\t\t\t\tself.visualizer.plot_episodes(\n\t\t\t\t\texplored_total_rewards, safe_total_rewards, explorations, \n\t\t\t\t\tos.path.join(fld_save, 'total_rewards.png'),\n\t\t\t\t\tMA_window)\n\t\t\t\t\t\"\"\"\n\n\n\n\n\tdef test(self, n_episode, save_per_episode=10, subfld='testing'):\n\n\t\tfld_save = os.path.join(self.fld_save, subfld)\n\t\tmakedirs(fld_save)\n\t\tMA_window = 100\t\t# MA of performance\n\t\tsafe_total_rewards = []\n\t\tpath_record = os.path.join(fld_save,'record.csv')\n\n\t\twith open(path_record,'w') as f:\n\t\t\tf.write('episode,game,pnl,rel,MA\\n')\n\n\t\tfor n in range(n_episode):\n\t\t\tprint('\\ntesting...')\n\t\t\t\n\t\t\tsafe_cum_rewards, safe_actions, _ = self.play_one_episode(0, training=False, rand_price=True)\n\t\t\tsafe_total_rewards.append(100.*safe_cum_rewards[-1]/self.env.max_profit)\n\t\t\tMA_safe_total_rewards = np.median(safe_total_rewards[-MA_window:])\n\t\t\tss = [str(n), self.env.title.replace(',',';'), \n\t\t\t\t'%.1f'%(safe_cum_rewards[-1]),\n\t\t\t\t'%.1f'%(safe_total_rewards[-1]), \n\t\t\t\t'%.1f'%MA_safe_total_rewards]\n\t\t\t\n\t\t\twith open(path_record,'a') as f:\n\t\t\t\tf.write(','.join(ss)+'\\n')\n\t\t\t\tprint('\\t'.join(ss))\n\n\t\t\t\n\t\t\tif n%save_per_episode == 0:\n\t\t\t\tprint('saving results...')\n\n\t\t\t\t\"\"\"\n\t\t\t\tself.visualizer.plot_a_episode(\n\t\t\t\t\tself.env, self.agent.model, \n\t\t\t\t\t[np.nan]*len(safe_cum_rewards), [np.nan]*len(safe_actions),\n\t\t\t\t\tsafe_cum_rewards, safe_actions,\n\t\t\t\t\tos.path.join(fld_save, 'episode_%i.png'%(n)))\n\n\t\t\t\tself.visualizer.plot_episodes(\n\t\t\t\t\tNone, safe_total_rewards, None, \n\t\t\t\t\tos.path.join(fld_save, 'total_rewards.png'),\n\t\t\t\t\tMA_window)\n\t\t\t\t\t\"\"\"\n\t\t\t\t\t\n\n\n\n\tdef __init__(self, agent, env, \n\t\tvisualizer, fld_save):\n\n\t\tself.agent = agent\n\t\tself.env = env\n\t\tself.visualizer = visualizer\n\t\tself.fld_save = fld_save\n\n\n\n\n\nif __name__ == '__main__':\n\t#print 'episode%i, init%i'%(1,2)\n\ta = [1,2,3]\n\tprint(np.mean(a[-100:]))"
  },
  {
    "path": "src/visualizer.py",
    "content": "from lib import *\n\n\n\ndef get_tick_labels(bins, ticks):\n\n\tticklabels = []\n\tfor i in ticks:\n\t\tif i < len(bins):\n\t\t\tticklabels.append('%.2f'%(bins[int(i)]))\n\t\telse:\n\t\t\tticklabels.append('%.2f'%(bins[-1])+'+')\n\n\treturn ticklabels\n\n\n\nclass Visualizer:\n\n\tdef __init__(self, action_labels):\n\t\tself.n_action = len(action_labels)\n\t\tself.action_labels = action_labels\n\n\n\tdef plot_a_episode(self, \n\t\tenv, model,\n\t\texplored_cum_rewards, explored_actions, \n\t\tsafe_cum_rewards, safe_actions,\n\t\tfig_path):\n\n\t\tf, axs = plt.subplots(3,1,sharex=True, figsize=(14,14))\n\t\tax_price, ax_action, ax_Q = axs  \n\n\t\tls = ['-','--']\n\t\tfor i in range(min(2,env.prices.shape[1])):\n\t\t\tp = env.prices[:,i]/env.prices[0,i]*100 - 100\n\t\t\tax_price.plot(p, 'k'+ls[i], label='input%i - 100'%i)\n\n\t\tax_price.plot(explored_cum_rewards, 'b', label='explored P&L')\n\t\tax_price.plot(safe_cum_rewards, 'r', label='safe P&L')\n\t\tax_price.legend(loc='best', frameon=False)\n\t\tax_price.set_title(env.title+', ideal: %.1f, safe: %.1f, explored: %1.f'%(\n\t\t\tenv.max_profit, safe_cum_rewards[-1], explored_cum_rewards[-1]))\n\n\t\tax_action.plot(explored_actions, 'b', label='explored')\n\t\tax_action.plot(safe_actions, 'r', label='safe', linewidth=2)\n\t\tax_action.set_ylim(-0.4, self.n_action-0.6)\n\t\tax_action.set_ylabel('action')\n\t\tax_action.set_yticks(range(self.n_action))\n\t\tax_action.legend(loc='best', frameon=False)\n\t\t\n\t\tstyle = ['k','r','b']\n\t\tqq = []\n\t\tfor t in xrange(env.t0):\n\t\t\tqq.append([np.nan] * self.n_action)\n\t\tfor t in xrange(env.t0, env.t_max):\n\t\t\tqq.append(model.predict(env.get_state(t))) \n\t\tfor i in xrange(self.n_action):\n\t\t\tax_Q.plot([float(qq[t][i]) for t in xrange(len(qq))], \n\t\t\t\tstyle[i], label=self.action_labels[i])\n\t\tax_Q.set_ylabel('Q')\n\t\tax_Q.legend(loc='best', frameon=False)\n\t\tax_Q.set_xlabel('t')\n\n\t\tplt.subplots_adjust(wspace=0.4)\n\t\tplt.savefig(fig_path)\n\t\tplt.close()\n\n\n\n\tdef plot_episodes(self, \n\t\texplored_total_rewards, safe_total_rewards, explorations, \n\t\tfig_path, MA_window=100):\n\n\t\tf = plt.figure(figsize=(14,10))\t# width, height in inch (100 pixel)\n\t\tif explored_total_rewards is None:\n\t\t\tf, ax_reward = plt.subplots()\n\t\telse:\n\t\t\tfigshape = (3,1)\n\t\t\tax_reward = plt.subplot2grid(figshape, (0, 0), rowspan=2)\n\t\t\tax_exploration = plt.subplot2grid(figshape, (2, 0), sharex=ax_reward)\n\n\t\ttt = range(len(safe_total_rewards))\n\n\t\tif explored_total_rewards is not None:\n\t\t\tma = pd.rolling_median(np.array(explored_total_rewards), window=MA_window, min_periods=1)\n\t\t\tstd = pd.rolling_std(np.array(explored_total_rewards), window=MA_window, min_periods=3)\n\t\t\tax_reward.plot(tt, explored_total_rewards,'bv', fillstyle='none')\n\t\t\tax_reward.plot(tt, ma, 'b', label='explored ma', linewidth=2)\n\t\t\tax_reward.plot(tt, std, 'b--', label='explored std', linewidth=2)\n\n\t\tma = pd.rolling_median(np.array(safe_total_rewards), window=MA_window, min_periods=1)\n\t\tstd = pd.rolling_std(np.array(safe_total_rewards), window=MA_window, min_periods=3)\n\t\tax_reward.plot(tt, safe_total_rewards,'ro', fillstyle='none')\n\t\tax_reward.plot(tt, ma,'r', label='safe ma', linewidth=2)\n\t\tax_reward.plot(tt, std,'r--', label='safe std', linewidth=2)\n\n\t\tax_reward.axhline(y=0, color='k', linestyle=':')\n\t\t#ax_reward.axhline(y=60, color='k', linestyle=':')\n\t\tax_reward.set_ylabel('total reward')\n\t\tax_reward.legend(loc='best', frameon=False)\n\t\tax_reward.yaxis.tick_right()\n\t\tylim = ax_reward.get_ylim()\n\t\tax_reward.set_ylim((max(-100,ylim[0]), min(100,ylim[1])))\n\n\t\tif explored_total_rewards is not None:\n\t\t\tax_exploration.plot(tt, np.array(explorations)*100., 'k')\n\t\t\tax_exploration.set_ylabel('exploration')\n\t\t\tax_exploration.set_xlabel('episode')\n\n\t\tplt.savefig(fig_path)\n\t\tplt.close()\n\t\t\n\n\n\ndef test_visualizer():\n\n\tf = plt.figure()#figsize=(5,8))\n\taxs_action = []\n\tncol = 3\n\tnrow = 2\n\n\tclim = (0,1)\n\n\tax = plt.subplot2grid((nrow, ncol), (0,ncol-1))\n\tax.matshow(np.random.random((2,2)), cmap='RdYlBu_r', clim=clim)\n\n\tfor action in range(3):\n\t\trow = 1 + action/ncol\n\t\tcol = action%ncol\n\t\tax = plt.subplot2grid((nrow, ncol), (row,col))\n\t\tcax = ax.matshow(np.random.random((2,2)), cmap='RdYlBu_r', clim=clim)\n\t\n\n\tax = plt.subplot2grid((nrow, ncol), (0,0), colspan=ncol-1)\n\tcbar = f.colorbar(cax, ax=ax)\n\n\tplt.show()\n\n\n\n\nclass VisualizerSequential:\n\n\tdef config(self):\n\t\tpass\n\n\tdef __init__(self, model):\n\t\tself.model = model\n\t\tself.layers = []\n\t\tfor layer in self.model.layers:\n\t\t\tself.layers.append(str(layer.name))\n\n\t\tself.inter_models = dict()\n\t\tmodel_input = self.model.input\n\t\tfor layer in self.layers:\n\t\t\tself.inter_models[layer] = keras.models.Model(\n\t\t\t\t\t\t\t\tinputs=model_input,\n                                outputs=self.model.get_layer(layer).output)\n\t\tself.config()\n\n\n\nclass VisualizerConv1D(VisualizerSequential):\n\n\tdef config(self):\n\n\t\tself.n_channel = self.model.input.shape[2]\n\t\tn_col = self.n_channel\n\t\tfor layer in self.layers:\n\t\t\tshape = self.inter_models[layer].output.shape\n\t\t\tif len(shape) == 3:\n\t\t\t\tn_col = max(n_col, shape[2])\n\n\t\tself.figshape = (len(self.layers)+1, int(n_col))\n\n\n\tdef plot(self, x):\n\n\t\tf = plt.figure(figsize=(30,30))\t\n\t\t\n\t\tfor i in range(self.n_channel):\n\t\t\tax = plt.subplot2grid(self.figshape, (0,i))\n\t\t\tax.plot(x[0,:,i], '.-')\n\t\t\tax.set_title('input, channel %i'%i)\n\n\t\tfor i_layer in range(len(self.layers)):\n\t\t\tlayer = self.layers[i_layer]\n\t\t\tz = self.inter_models[layer].predict(x)\n\t\t\tprint('plotting '+layer)\n\t\t\tif len(z.shape) == 3:\n\t\t\t\tfor i in range(z.shape[2]):\n\t\t\t\t\tax = plt.subplot2grid(self.figshape, (i_layer+1, i))\n\t\t\t\t\tax.plot(z[0,:,i], '.-')\n\t\t\t\t\tax.set_title(layer+' filter %i'%i)\n\t\t\telse:\n\t\t\t\tax = plt.subplot2grid(self.figshape, (i_layer+1, 0))\n\t\t\t\tax.plot(z[0,:], '.-')\n\t\t\t\tax.set_title(layer)\n\n\n\t\tax.set_ylim(-100,100)\n\n\n\tdef print_w(self):\n\t\tlayer = self.layers[0]\n\t\tww = self.inter_models[layer].get_weights()\n\t\tfor w in ww:\n\t\t\tprint(w.shape)\n\t\t\tprint(w)\n\n"
  }
]