[
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nenv/\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\n*.egg-info/\n.installed.cfg\n*.egg\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*,cover\n.hypothesis/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# IPython Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# dotenv\n.env\n\n# virtualenv\nvenv/\nENV/\n\n# Spyder project settings\n.spyderproject\n\n# Rope project settings\n.ropeproject\n\n*.sublime*\nMNIST_data/"
  },
  {
    "path": "LICENSE",
    "content": "The MIT License (MIT)\n\nCopyright (c) 2016 Paras Dahal\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."
  },
  {
    "path": "README.md",
    "content": "# deepnet\n\nImplementations of CNNs, RNNs and cool new techniques in deep learning\n\nNote: deepnet is a work in progress and things will be added gradually. It is not intended for production, use it to learn and study implementations of latest and greatest in deep learning.\n\n## What does it have?\n\n**Network Architecture**\n1. Convolutional net\n2. Feed forward net\n3. Recurrent net (LSTM/GRU coming soon)\n\n**Optimization Algorithms**\n1. SGD\n2. SGD with momentum\n3. Nesterov Accelerated Gradient\n4. Adagrad\n5. RMSprop\n6. Adam\n\n**Regularization**\n1. Dropout\n2. L1 and L2 Regularization\n\n**Cool Techniques**\n\n1. BatchNorm\n2. Xavier Weight Initialization\n\n**Nonlinearities**\n1. ReLU\n2. Sigmoid\n3. tanh\n\n\n## Usage\n\n1. ```virtualenv .env``` ; create a virtual environment\n2. ```source .env/bin/activate``` ; activate the virtual environment\n3. ```pip install -r requirements.txt``` ; Install dependencies\n4. ```python run_cnn.py {mnist|cifar10}``` ; mnist for shallow cnn and cifar10 for deep cnn"
  },
  {
    "path": "deepnet/Gradient Checking.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# Numerical Gradient checking of Layers\\n\",\n    \"\\n\",\n    \"Verify the correctness of implementation using Gradient checks provided in CS231 2nd assignment.\\n\",\n    \"\\n\",\n    \"1. **Probably Wrong**: relative error > 1e-2 \\n\",\n    \"2. **Something not right** :1e-2 > relative error > 1e-4 \\n\",\n    \"3. **Okay for objectives with kinks**: 1e-4 > relative error, if no kinks then too high\\n\",\n    \"4. **Most likely Right**: relative error < 1e-7 \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 1,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"from layers import *\\n\",\n    \"from loss import SoftmaxLoss\\n\",\n    \"from nnet import NeuralNet\\n\",\n    \"from solver import sgd,sgd_momentum,adam\\n\",\n    \"import sys\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Numerical Gradient Functions\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 2,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"def rel_error(x, y):\\n\",\n    \"  \\\"\\\"\\\" returns relative error \\\"\\\"\\\"\\n\",\n    \"  return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))\\n\",\n    \"\\n\",\n    \"def numerical_gradient_array(f, x, df, h=1e-5):\\n\",\n    \"  \\\"\\\"\\\"\\n\",\n    \"  Evaluate a numeric gradient for a function that accepts a numpy\\n\",\n    \"  array and returns a numpy array.\\n\",\n    \"  \\\"\\\"\\\"\\n\",\n    \"  grad = np.zeros_like(x)\\n\",\n    \"  it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\\n\",\n    \"  while not it.finished:\\n\",\n    \"\\n\",\n    \"    ix = it.multi_index\\n\",\n    \"    oldval = x[ix]\\n\",\n    \"    x[ix] = oldval + h\\n\",\n    \"    pos = f(x).copy()\\n\",\n    \"    x[ix] = oldval - h\\n\",\n    \"    neg = f(x).copy()\\n\",\n    \"    x[ix] = oldval\\n\",\n    \"\\n\",\n    \"    grad[ix] = np.sum((pos - neg) * df) / (2 * h)\\n\",\n    \"\\n\",\n    \"    it.iternext()\\n\",\n    \"  return grad\\n\",\n    \"\\n\",\n    \"def eval_numerical_gradient(f, x, verbose=True, h=0.00001):\\n\",\n    \"  \\\"\\\"\\\"\\n\",\n    \"  a naive implementation of numerical gradient of f at x\\n\",\n    \"  - f should be a function that takes a single argument\\n\",\n    \"  - x is the point (numpy array) to evaluate the gradient at\\n\",\n    \"  \\\"\\\"\\\"\\n\",\n    \"\\n\",\n    \"  fx = f(x) # evaluate function value at original point\\n\",\n    \"\\n\",\n    \"  grad = np.zeros_like(x)\\n\",\n    \"  # iterate over all indexes in x\\n\",\n    \"  it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\\n\",\n    \"  while not it.finished:\\n\",\n    \"    # evaluate function at x+h\\n\",\n    \"    ix = it.multi_index\\n\",\n    \"    oldval = x[ix]\\n\",\n    \"    x[ix] = oldval + h # increment by h\\n\",\n    \"    fxph = f(x) # evalute f(x + h)\\n\",\n    \"    x[ix] = oldval - h\\n\",\n    \"    fxmh = f(x) # evaluate f(x - h)\\n\",\n    \"    x[ix] = oldval # restore\\n\",\n    \"\\n\",\n    \"    # compute the partial derivative with centered formula\\n\",\n    \"    grad[ix] = (fxph - fxmh) / (2 * h) # the slope\\n\",\n    \"    if verbose:\\n\",\n    \"      print(ix, grad[ix])\\n\",\n    \"    it.iternext() # step to next dimension\\n\",\n    \"\\n\",\n    \"  return grad\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Convolution Layer\\n\",\n    \"\\n\",\n    \"Perform numerical grdient checking for verifying the implementation of convolution layer.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Forward Pass\\n\",\n    \"\\n\",\n    \"The difference of correct_out and out should be around 1e-8\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 3,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Testing forward pass of Conv Layer\\n\",\n      \"Difference:  2.21214764967e-08\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"x_shape = (2, 3, 4, 4)\\n\",\n    \"w_shape = (3, 3, 4, 4)\\n\",\n    \"x = np.linspace(-0.1, 0.5, num=np.prod(x_shape)).reshape(x_shape)\\n\",\n    \"w = np.linspace(-0.2, 0.3, num=np.prod(w_shape)).reshape(w_shape)\\n\",\n    \"b = np.linspace(-0.1, 0.2, num=3)\\n\",\n    \"\\n\",\n    \"c_layer = Conv((3,4,4),n_filter=3,h_filter=4,w_filter=4,stride=2,padding=1)\\n\",\n    \"c_layer.W = w\\n\",\n    \"c_layer.b = b.reshape(-1,1)\\n\",\n    \"\\n\",\n    \"correct_out = np.array([[[[-0.08759809, -0.10987781],\\n\",\n    \"                           [-0.18387192, -0.2109216 ]],\\n\",\n    \"                          [[ 0.21027089,  0.21661097],\\n\",\n    \"                           [ 0.22847626,  0.23004637]],\\n\",\n    \"                          [[ 0.50813986,  0.54309974],\\n\",\n    \"                           [ 0.64082444,  0.67101435]]],\\n\",\n    \"                         [[[-0.98053589, -1.03143541],\\n\",\n    \"                           [-1.19128892, -1.24695841]],\\n\",\n    \"                          [[ 0.69108355,  0.66880383],\\n\",\n    \"                           [ 0.59480972,  0.56776003]],\\n\",\n    \"                          [[ 2.36270298,  2.36904306],\\n\",\n    \"                           [ 2.38090835,  2.38247847]]]])\\n\",\n    \"\\n\",\n    \"out = c_layer.forward(x)\\n\",\n    \"\\n\",\n    \"error = rel_error(out,correct_out)\\n\",\n    \"print(\\\"Testing forward pass of Conv Layer\\\")\\n\",\n    \"print(\\\"Difference: \\\",error)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Backward pass\\n\",\n    \"\\n\",\n    \"The errors for gradients should be around 1e-9\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 4,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Testing backward pass of Conv Layer\\n\",\n      \"dX error:  6.30285589596e-09\\n\",\n      \"dW error:  3.66468373932e-10\\n\",\n      \"db error:  6.8390384471e-12\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"x = np.random.randn(4, 3, 5, 5)\\n\",\n    \"w = np.random.randn(2, 3, 3, 3)\\n\",\n    \"b = np.random.randn(2,).reshape(-1,1)\\n\",\n    \"dout = np.random.randn(4, 2, 5, 5)\\n\",\n    \"\\n\",\n    \"c_layer = Conv((3,5,5),n_filter=2,h_filter=3,w_filter=3,stride=1,padding=1)\\n\",\n    \"c_layer.W = w\\n\",\n    \"c_layer.b = b\\n\",\n    \"\\n\",\n    \"dx_num = numerical_gradient_array(lambda x: c_layer.forward(x), x, dout)\\n\",\n    \"dw_num = numerical_gradient_array(lambda w: c_layer.forward(x), w, dout)\\n\",\n    \"db_num = numerical_gradient_array(lambda b: c_layer.forward(x), b, dout)\\n\",\n    \"\\n\",\n    \"out = c_layer.forward(x)\\n\",\n    \"dx,grads = c_layer.backward(dout)\\n\",\n    \"dw,db = grads\\n\",\n    \"\\n\",\n    \"print(\\\"Testing backward pass of Conv Layer\\\")\\n\",\n    \"print(\\\"dX error: \\\",rel_error(dx,dx_num))\\n\",\n    \"print(\\\"dW error: \\\",rel_error(dw,dw_num))\\n\",\n    \"print(\\\"db error: \\\",rel_error(db,db_num))\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Maxpool Layer\\n\",\n    \"\\n\",\n    \"Perform gradient check for maxpool layer and verify correctness of its implementation\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Forward Pass\\n\",\n    \"\\n\",\n    \"Difference should be around 1e-8\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 5,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Testing max_pool_forward_naive function:\\n\",\n      \"difference:  4.16666651573e-08\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"x_shape = (2, 3, 4, 4)\\n\",\n    \"x = np.linspace(-0.3, 0.4, num=np.prod(x_shape)).reshape(x_shape)\\n\",\n    \"\\n\",\n    \"pool = Maxpool((3,4,4),size=2,stride=2)\\n\",\n    \"\\n\",\n    \"out = pool.forward(x,)\\n\",\n    \"correct_out = np.array([[[[-0.26315789, -0.24842105],\\n\",\n    \"                          [-0.20421053, -0.18947368]],\\n\",\n    \"                         [[-0.14526316, -0.13052632],\\n\",\n    \"                          [-0.08631579, -0.07157895]],\\n\",\n    \"                         [[-0.02736842, -0.01263158],\\n\",\n    \"                          [ 0.03157895,  0.04631579]]],\\n\",\n    \"                        [[[ 0.09052632,  0.10526316],\\n\",\n    \"                          [ 0.14947368,  0.16421053]],\\n\",\n    \"                         [[ 0.20842105,  0.22315789],\\n\",\n    \"                          [ 0.26736842,  0.28210526]],\\n\",\n    \"                         [[ 0.32631579,  0.34105263],\\n\",\n    \"                          [ 0.38526316,  0.4       ]]]])\\n\",\n    \"\\n\",\n    \"print('Testing max_pool_forward_naive function:')\\n\",\n    \"print('difference: ', rel_error(out, correct_out))\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Backward Pass\\n\",\n    \"\\n\",\n    \"Error should be around 1e-12\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 6,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Testing bacward pass of Maxpool layer\\n\",\n      \"dX error:  3.27561819731e-12\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"x = np.random.randn(3, 2, 8, 8)\\n\",\n    \"dout = np.random.randn(3, 2, 4, 4)\\n\",\n    \"\\n\",\n    \"pool = Maxpool((2,8,8),size=2,stride=2)\\n\",\n    \"\\n\",\n    \"dx_num = numerical_gradient_array(lambda x: pool.forward(x), x, dout)\\n\",\n    \"\\n\",\n    \"out = pool.forward(x)\\n\",\n    \"dx,_ = pool.backward(dout)\\n\",\n    \"\\n\",\n    \"print('Testing bacward pass of Maxpool layer')\\n\",\n    \"print('dX error: ', rel_error(dx, dx_num))\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## ReLU Layer\\n\",\n    \"Error should be around 1e-12\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 7,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Testing backward pass of ReLU layer\\n\",\n      \"dX error:  3.275621976e-12\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"x = np.random.randn(3, 2, 8, 8)\\n\",\n    \"dout = np.random.randn(3, 2, 8, 8)\\n\",\n    \"\\n\",\n    \"r = ReLU()\\n\",\n    \"\\n\",\n    \"dx_num = numerical_gradient_array(lambda x:r.forward(x), x, dout)\\n\",\n    \"\\n\",\n    \"out = r.forward(x)\\n\",\n    \"dx,_ = r.backward(dout)\\n\",\n    \"\\n\",\n    \"print('Testing backward pass of ReLU layer')\\n\",\n    \"print('dX error: ',rel_error(dx,dx_num))\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Conv-ReLU-MaxPool\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 8,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Testing conv_relu_pool\\n\",\n      \"dx error:  1.01339343448e-08\\n\",\n      \"dw error:  7.41563088659e-10\\n\",\n      \"db error:  7.51304173633e-11\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"x = np.random.randn(2, 3, 16, 16)\\n\",\n    \"w = np.random.randn(3, 3, 3, 3)\\n\",\n    \"b = np.random.randn(3,).reshape(-1,1)\\n\",\n    \"dout = np.random.randn(2, 3, 8, 8)\\n\",\n    \"\\n\",\n    \"c = Conv((3,16,16),n_filter=3,h_filter=3,w_filter=3,stride=1,padding=1)\\n\",\n    \"c.W, c.b = w, b\\n\",\n    \"r = ReLU()\\n\",\n    \"m = Maxpool(c.out_dim,size=2,stride=2)\\n\",\n    \"\\n\",\n    \"def conv_relu_pool_forward(c,r,m,x):\\n\",\n    \"    c_out = c.forward(x)\\n\",\n    \"    r_out = r.forward(c_out)\\n\",\n    \"    m_out = m.forward(r_out)\\n\",\n    \"    return m_out\\n\",\n    \"\\n\",\n    \"dx_num = numerical_gradient_array(lambda x: conv_relu_pool_forward(c,r,m,x), x, dout)\\n\",\n    \"dw_num = numerical_gradient_array(lambda w: conv_relu_pool_forward(c,r,m,x), w, dout)\\n\",\n    \"db_num = numerical_gradient_array(lambda b: conv_relu_pool_forward(c,r,m,x), b, dout)\\n\",\n    \"\\n\",\n    \"m_dx,_ = m.backward(dout)\\n\",\n    \"r_dx,_ = r.backward(m_dx)\\n\",\n    \"dx,grads = c.backward(r_dx)\\n\",\n    \"dw,db = grads\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"print('Testing conv_relu_pool')\\n\",\n    \"print('dx error: ', rel_error(dx_num, dx))\\n\",\n    \"print('dw error: ', rel_error(dw_num, dw))\\n\",\n    \"print('db error: ', rel_error(db_num, db))\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Fully Connected Layer\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 9,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"[[ 1.49834967  1.70660132  1.91485297]\\n\",\n      \" [ 3.25553199  3.5141327   3.77273342]]\\n\",\n      \"Testing fully connected forward pass:\\n\",\n      \"difference:  9.76985004799e-10\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"num_inputs = 2\\n\",\n    \"input_shape = (4, 5, 6)\\n\",\n    \"output_dim = 3\\n\",\n    \"\\n\",\n    \"input_size = num_inputs * np.prod(input_shape)\\n\",\n    \"weight_size = output_dim * np.prod(input_shape)\\n\",\n    \"\\n\",\n    \"x = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, *input_shape)\\n\",\n    \"w = np.linspace(-0.2, 0.3, num=weight_size).reshape(np.prod(input_shape), output_dim)\\n\",\n    \"b = np.linspace(-0.3, 0.1, num=output_dim).reshape(1,-1)\\n\",\n    \"\\n\",\n    \"flat = Flatten()\\n\",\n    \"x = flat.forward(x)\\n\",\n    \"\\n\",\n    \"f = FullyConnected(120,3)\\n\",\n    \"f.W,f.b= w,b\\n\",\n    \"out = f.forward(x)\\n\",\n    \"\\n\",\n    \"correct_out = np.array([[ 1.49834967,  1.70660132,  1.91485297],\\n\",\n    \"                        [ 3.25553199,  3.5141327,   3.77273342]])\\n\",\n    \"\\n\",\n    \"print(out)\\n\",\n    \"# Compare your output with ours. The error should be around 1e-9.\\n\",\n    \"print('Testing fully connected forward pass:')\\n\",\n    \"print('difference: ', rel_error(out, correct_out))\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 10,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Testing fully connected backward pass:\\n\",\n      \"dx error:  2.89903091526e-09\\n\",\n      \"dw error:  1.32127575542e-09\\n\",\n      \"db error:  1.03150657456e-11\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"x = np.random.randn(10, 2, 3)\\n\",\n    \"w = np.random.randn(6, 5)\\n\",\n    \"b = np.random.randn(5)\\n\",\n    \"dout = np.random.randn(10, 5)\\n\",\n    \"\\n\",\n    \"flat = Flatten()\\n\",\n    \"x = flat.forward(x)\\n\",\n    \"\\n\",\n    \"f = FullyConnected(60,5)\\n\",\n    \"f.W,f.b= w,b\\n\",\n    \"\\n\",\n    \"dx_num = numerical_gradient_array(lambda x: f.forward(x), x, dout)\\n\",\n    \"dw_num = numerical_gradient_array(lambda w: f.forward(x), w, dout)\\n\",\n    \"db_num = numerical_gradient_array(lambda b: f.forward(x), b, dout)\\n\",\n    \"\\n\",\n    \"dx,grads= f.backward(dout)\\n\",\n    \"dw, db = grads\\n\",\n    \"# The error should be around 1e-10\\n\",\n    \"print('Testing fully connected backward pass:')\\n\",\n    \"print('dx error: ', rel_error(dx_num, dx))\\n\",\n    \"print('dw error: ', rel_error(dw_num, dw))\\n\",\n    \"print('db error: ', rel_error(db_num, db))\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Softmax Loss\\n\",\n    \"\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 11,\n   \"metadata\": {\n    \"collapsed\": false\n   },\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Testing SoftmaxLoss:\\n\",\n      \"loss:  2.30283790984\\n\",\n      \"dx error:  1.05396983612e-08\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"num_classes, num_inputs = 10, 50\\n\",\n    \"x = 0.001 * np.random.randn(num_inputs, num_classes)\\n\",\n    \"y = np.random.randint(num_classes, size=num_inputs)\\n\",\n    \"\\n\",\n    \"dx_num = eval_numerical_gradient(lambda x: SoftmaxLoss(x,y)[0], x,verbose=False)\\n\",\n    \"loss,dx = SoftmaxLoss(x,y)\\n\",\n    \"\\n\",\n    \"# Test softmax_loss function. Loss should be 2.3 and dx error should be 1e-8\\n\",\n    \"print('Testing SoftmaxLoss:')\\n\",\n    \"print('loss: ', loss)\\n\",\n    \"print('dx error: ', rel_error(dx_num, dx))\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.6.0\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n"
  },
  {
    "path": "deepnet/im2col.py",
    "content": "import numpy as np\n\n\ndef get_im2col_indices(x_shape, field_height=3, field_width=3, padding=1, stride=1):\n  # First figure out what the size of the output should be\n  N, C, H, W = x_shape\n  assert (H + 2 * padding - field_height) % stride == 0\n  assert (W + 2 * padding - field_height) % stride == 0\n  out_height = (H + 2 * padding - field_height) / stride + 1\n  out_width = (W + 2 * padding - field_width) / stride + 1\n\n  i0 = np.repeat(np.arange(field_height,dtype='int32'), field_width)\n  i0 = np.tile(i0, C)\n  i1 = stride * np.repeat(np.arange(out_height,dtype='int32'), out_width)\n  j0 = np.tile(np.arange(field_width), field_height * C)\n  j1 = stride * np.tile(np.arange(out_width,dtype='int32'), int(out_height))\n  i = i0.reshape(-1, 1) + i1.reshape(1, -1)\n  j = j0.reshape(-1, 1) + j1.reshape(1, -1)\n\n  k = np.repeat(np.arange(C,dtype='int32'), field_height * field_width).reshape(-1, 1)\n\n  return (k, i, j)\n\ndef im2col_indices(x, field_height=3, field_width=3, padding=1, stride=1):\n  \"\"\" An implementation of im2col based on some fancy indexing \"\"\"\n  # Zero-pad the input\n  p = padding\n  x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')\n\n  k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding,\n                               stride)\n\n  cols = x_padded[:, k, i, j]\n  C = x.shape[1]\n  cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)\n  return cols\n\n\ndef col2im_indices(cols, x_shape, field_height=3, field_width=3, padding=1,\n                   stride=1):\n  \"\"\" An implementation of col2im based on fancy indexing and np.add.at \"\"\"\n  N, C, H, W = x_shape\n  H_padded, W_padded = H + 2 * padding, W + 2 * padding\n  x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype)\n  k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding,\n                               stride)\n  cols_reshaped = cols.reshape(C * field_height * field_width, -1, N)\n  cols_reshaped = cols_reshaped.transpose(2, 0, 1)\n  np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped)\n  if padding == 0:\n    return x_padded\n  return x_padded[:, :, padding:-padding, padding:-padding]\n\npass"
  },
  {
    "path": "deepnet/layers.py",
    "content": "import numpy as np\nfrom deepnet.im2col import *\n\n\nclass Conv():\n\n    def __init__(self, X_dim, n_filter, h_filter, w_filter, stride, padding):\n\n        self.d_X, self.h_X, self.w_X = X_dim\n\n        self.n_filter, self.h_filter, self.w_filter = n_filter, h_filter, w_filter\n        self.stride, self.padding = stride, padding\n\n        self.W = np.random.randn(\n            n_filter, self.d_X, h_filter, w_filter) / np.sqrt(n_filter / 2.)\n        self.b = np.zeros((self.n_filter, 1))\n        self.params = [self.W, self.b]\n\n        self.h_out = (self.h_X - h_filter + 2 * padding) / stride + 1\n        self.w_out = (self.w_X - w_filter + 2 * padding) / stride + 1\n\n        if not self.h_out.is_integer() or not self.w_out.is_integer():\n            raise Exception(\"Invalid dimensions!\")\n\n        self.h_out, self.w_out = int(self.h_out), int(self.w_out)\n        self.out_dim = (self.n_filter, self.h_out, self.w_out)\n\n    def forward(self, X):\n\n        self.n_X = X.shape[0]\n\n        self.X_col = im2col_indices(\n            X, self.h_filter, self.w_filter, stride=self.stride, padding=self.padding)\n        W_row = self.W.reshape(self.n_filter, -1)\n\n        out = W_row @ self.X_col + self.b\n        out = out.reshape(self.n_filter, self.h_out, self.w_out, self.n_X)\n        out = out.transpose(3, 0, 1, 2)\n        return out\n\n    def backward(self, dout):\n\n        dout_flat = dout.transpose(1, 2, 3, 0).reshape(self.n_filter, -1)\n\n        dW = dout_flat @ self.X_col.T\n        dW = dW.reshape(self.W.shape)\n\n        db = np.sum(dout, axis=(0, 2, 3)).reshape(self.n_filter, -1)\n\n        W_flat = self.W.reshape(self.n_filter, -1)\n\n        dX_col = W_flat.T @ dout_flat\n        shape = (self.n_X, self.d_X, self.h_X, self.w_X)\n        dX = col2im_indices(dX_col, shape, self.h_filter,\n                            self.w_filter, self.padding, self.stride)\n\n        return dX, [dW, db]\n\n\nclass Maxpool():\n\n    def __init__(self, X_dim, size, stride):\n\n        self.d_X, self.h_X, self.w_X = X_dim\n\n        self.params = []\n\n        self.size = size\n        self.stride = stride\n\n        self.h_out = (self.h_X - size) / stride + 1\n        self.w_out = (self.w_X - size) / stride + 1\n\n        if not self.h_out.is_integer() or not self.w_out.is_integer():\n            raise Exception(\"Invalid dimensions!\")\n\n        self.h_out, self.w_out = int(self.h_out), int(self.w_out)\n        self.out_dim = (self.d_X, self.h_out, self.w_out)\n\n    def forward(self, X):\n        self.n_X = X.shape[0]\n        X_reshaped = X.reshape(\n            X.shape[0] * X.shape[1], 1, X.shape[2], X.shape[3])\n\n        self.X_col = im2col_indices(\n            X_reshaped, self.size, self.size, padding=0, stride=self.stride)\n\n        self.max_indexes = np.argmax(self.X_col, axis=0)\n        out = self.X_col[self.max_indexes, range(self.max_indexes.size)]\n\n        out = out.reshape(self.h_out, self.w_out, self.n_X,\n                          self.d_X).transpose(2, 3, 0, 1)\n        return out\n\n    def backward(self, dout):\n\n        dX_col = np.zeros_like(self.X_col)\n        # flatten the gradient\n        dout_flat = dout.transpose(2, 3, 0, 1).ravel()\n\n        dX_col[self.max_indexes, range(self.max_indexes.size)] = dout_flat\n\n        # get the original X_reshaped structure from col2im\n        shape = (self.n_X * self.d_X, 1, self.h_X, self.w_X)\n        dX = col2im_indices(dX_col, shape, self.size,\n                            self.size, padding=0, stride=self.stride)\n        dX = dX.reshape(self.n_X, self.d_X, self.h_X, self.w_X)\n        return dX, []\n\n\nclass Flatten():\n\n    def __init__(self):\n        self.params = []\n\n    def forward(self, X):\n        self.X_shape = X.shape\n        self.out_shape = (self.X_shape[0], -1)\n        out = X.ravel().reshape(self.out_shape)\n        self.out_shape = self.out_shape[1]\n        return out\n\n    def backward(self, dout):\n        out = dout.reshape(self.X_shape)\n        return out, ()\n\n\nclass FullyConnected():\n\n    def __init__(self, in_size, out_size):\n\n        self.W = np.random.randn(in_size, out_size) / np.sqrt(in_size / 2.)\n        self.b = np.zeros((1, out_size))\n        self.params = [self.W, self.b]\n\n    def forward(self, X):\n        self.X = X\n        out = self.X @ self.W + self.b\n        return out\n\n    def backward(self, dout):\n        dW = self.X.T @ dout\n        db = np.sum(dout, axis=0)\n        dX = dout @ self.W.T\n        return dX, [dW, db]\n\n\nclass Batchnorm():\n\n    def __init__(self, X_dim):\n        self.d_X, self.h_X, self.w_X = X_dim\n        self.gamma = np.ones((1, int(np.prod(X_dim))))\n        self.beta = np.zeros((1, int(np.prod(X_dim))))\n        self.params = [self.gamma, self.beta]\n\n    def forward(self, X):\n        self.n_X = X.shape[0]\n        self.X_shape = X.shape\n\n        self.X_flat = X.ravel().reshape(self.n_X, -1)\n        self.mu = np.mean(self.X_flat, axis=0)\n        self.var = np.var(self.X_flat, axis=0)\n        self.X_norm = (self.X_flat - self.mu) / np.sqrt(self.var + 1e-8)\n        out = self.gamma * self.X_norm + self.beta\n\n        return out.reshape(self.X_shape)\n\n    def backward(self, dout):\n\n        dout = dout.ravel().reshape(dout.shape[0], -1)\n        X_mu = self.X_flat - self.mu\n        var_inv = 1. / np.sqrt(self.var + 1e-8)\n\n        dbeta = np.sum(dout, axis=0)\n        dgamma = np.sum(dout * self.X_norm, axis=0)\n\n        dX_norm = dout * self.gamma\n        dvar = np.sum(dX_norm * X_mu, axis=0) * - \\\n            0.5 * (self.var + 1e-8)**(-3 / 2)\n        dmu = np.sum(dX_norm * -var_inv, axis=0) + dvar * \\\n            1 / self.n_X * np.sum(-2. * X_mu, axis=0)\n        dX = (dX_norm * var_inv) + (dmu / self.n_X) + \\\n            (dvar * 2 / self.n_X * X_mu)\n\n        dX = dX.reshape(self.X_shape)\n        return dX, [dgamma, dbeta]\n\n\nclass Dropout():\n\n    def __init__(self, prob=0.5):\n        self.prob = prob\n        self.params = []\n\n    def forward(self, X):\n        self.mask = np.random.binomial(1, self.prob, size=X.shape) / self.prob\n        out = X * self.mask\n        return out.reshape(X.shape)\n\n    def backward(self, dout):\n        dX = dout * self.mask\n        return dX, []\n\n\nclass ReLU():\n    def __init__(self):\n        self.params = []\n\n    def forward(self, X):\n        self.X = X\n        return np.maximum(0, X)\n\n    def backward(self, dout):\n        dX = dout.copy()\n        dX[self.X <= 0] = 0\n        return dX, []\n\n\nclass sigmoid():\n    def __init__(self):\n        self.params = []\n\n    def forward(self, X):\n        out = 1.0 / (1.0 + np.exp(X))\n        self.out = out\n        return out\n\n    def backward(self, dout):\n        dX = dout * self.out * (1 - self.out)\n        return dX, []\n\n\nclass tanh():\n    def __init__(self):\n        self.params = []\n\n    def forward(self, X):\n        out = np.tanh(X)\n        self.out = out\n        return out\n\n    def backward(self, dout):\n        dX = dout * (1 - self.out**2)\n        return dX, []\n"
  },
  {
    "path": "deepnet/loss.py",
    "content": "import numpy as np\nfrom deepnet.utils import softmax\nfrom deepnet.layers import Conv, FullyConnected\n\n\ndef l2_regularization(layers, lam=0.001):\n    reg_loss = 0.0\n    for layer in layers:\n        if hasattr(layer, 'W'):\n            reg_loss += 0.5 * lam * np.sum(layer.W * layer.W)\n    return reg_loss\n\n\ndef delta_l2_regularization(layers, grads, lam=0.001):\n    for layer, grad in zip(layers, reversed(grads)):\n        if hasattr(layer, 'W'):\n            grad[0] += lam * layer.W\n    return grads\n\n\ndef l1_regularization(layers, lam=0.001):\n    reg_loss = 0.0\n    for layer in layers:\n        if hasattr(layer, 'W'):\n            reg_loss += lam * np.sum(np.abs(layer.W))\n    return reg_loss\n\n\ndef delta_l1_regularization(layers, grads, lam=0.001):\n    for layer, grad in zip(layers, reversed(grads)):\n        if hasattr(layer, 'W'):\n            grad[0] += lam * layer.W / (np.abs(layer.W) + 1e-8)\n    return grads\n\n\ndef SoftmaxLoss(X, y):\n    m = y.shape[0]\n    p = softmax(X)\n    log_likelihood = -np.log(p[range(m), y])\n    loss = np.sum(log_likelihood) / m\n\n    dx = p.copy()\n    dx[range(m), y] -= 1\n    dx /= m\n    return loss, dx\n"
  },
  {
    "path": "deepnet/nnet.py",
    "content": "import numpy as np\nfrom deepnet.loss import SoftmaxLoss, l2_regularization, delta_l2_regularization\nfrom deepnet.utils import accuracy, softmax\nfrom deepnet.utils import one_hot_encode\n\nclass CNN:\n\n    def __init__(self, layers, loss_func=SoftmaxLoss):\n        self.layers = layers\n        self.params = []\n        for layer in self.layers:\n            self.params.append(layer.params)\n        self.loss_func = loss_func\n\n    def forward(self, X):\n        for layer in self.layers:\n            X = layer.forward(X)\n        return X\n\n    def backward(self, dout):\n        grads = []\n        for layer in reversed(self.layers):\n            dout, grad = layer.backward(dout)\n            grads.append(grad)\n        return grads\n\n    def train_step(self, X, y):\n        out = self.forward(X)\n        loss, dout = self.loss_func(out, y)\n        loss += l2_regularization(self.layers)\n        grads = self.backward(dout)\n        grads = delta_l2_regularization(self.layers, grads)\n        return loss, grads\n\n    def predict(self, X):\n        X = self.forward(X)\n        return np.argmax(softmax(X), axis=1)\n\n\nclass RNN:\n\n    def __init__(self, vocab_size, h_size, char_to_idx, idx_to_char):\n        self.vocab_size = vocab_size\n        self.h_size = h_size\n        self.char_to_idx = char_to_idx\n        self.idx_to_char = idx_to_char\n        self.model = dict(\n            Wxh=np.random.rand(vocab_size, h_size) / np.sqrt(vocab_size / 2),\n            Whh=np.random.rand(h_size, h_size) / np.sqrt(h_size / 2),\n            Why=np.random.rand(h_size, vocab_size) / np.sqrt(h_size / 2),\n            bh=np.zeros((1, vocab_size)),\n            by=np.zeros((1, h_size))\n        )\n        self.initial_state = np.zeros((1, self.h_size))\n\n    def _forward(self, X, h):\n        # input to one hot\n        X_onehot = np.zeros(self.vocab_size)\n        X_onehot[X] = 1\n        X_onehot = X_onehot.reshape(1,-1)\n\n        h_prev = h.copy()\n        # calculate hidden step with tanh\n        h = np.tanh(np.dot(X,self.model['Wxh']) + np.dot(h_prev,self.model['Whh']) + self.model['bh'])\n\n        # fully connected forward step\n        y = np.dot(X, self.model['Why']) + self.model['by']\n\n        cache = (X_onehot, h_prev)\n        return y, h, cache\n\n    def _backward(self, out, y, dh_next, cache):\n\n        X_onehot, h_prev = cache\n\n        # gradient of output from froward step\n        dout = softmax(out)\n        dout[range(len(y)), y] -= 1\n        # fully connected backward step\n        dWhy = X_onehot.T @ dout\n        dby = np.sum(dWhy, axis=0).reshape(1, -1)\n        dh = dout @ self.dWhy.T\n        # gradient through tanh\n        dh = dout * (1 - out**2)\n        # add up gradient from previous gradient\n        dh += dh_next\n        # hidden state\n        dbh = dh\n        dWhh = h_prev.T @ dh\n        dWxh = X_onehot.T @ dh\n        dh_next = dh @ Whh.T\n\n        grads = dict(Wxh=dWxh, Whh=dWhh, Why=dWhy, bh=dbh, by=dby)\n\n        return grads, dh_next\n\n    def train_step(self,X_train, y_train, h):\n        ys, caches = [], []\n        total_loss = 0\n        grads = {k: np.zeros_like(v) for k, v in self.model.items()}\n\n        # forward pass and store values for bptt\n        for x, y in zip(X_train, y_train):\n            y_pred, h, cache = self._forward(x, h)\n            p = softmax(y_pred)\n            log_likelihood = -np.log(p[range(y_pred.shape[0]), y])\n            total_loss += np.sum(log_likelihood) / y_pred.shape[0]\n            ys.append(y_pred)\n            caches.append(cache)\n\n        total_loss /= X_train.shape[0]\n\n        # backprop through time\n        dh_next = np.zeros((1, self.h_size))\n        for t in reversed(range(len(X_train))):\n            grad, dh_next = self._backward(\n                ys[t], y_train[t], dh_next, caches[t])\n            # sum up the gradients for each time step\n            for k in grads.keys():\n                grads[k] += grad[k]\n\n        # clip vanishing/exploding gradients\n        for k, v in grads.items():\n            grads[k] = np.clip(v, -5.0, 5.0)\n\n        return loss, grads, h\n\n    def predict(self, X):\n        X = self.forward(X)\n        return np.argmax(softmax(X), axis=1)\n"
  },
  {
    "path": "deepnet/solver.py",
    "content": "import numpy as np\nfrom sklearn.utils import shuffle\nfrom deepnet.utils import accuracy\nimport copy\nfrom deepnet.loss import SoftmaxLoss\n\n\ndef get_minibatches(X, y, minibatch_size,shuffleTag=True):\n    m = X.shape[0]\n    minibatches = []\n    if shuffleTag:\n        X, y = shuffle(X, y)\n    for i in range(0, m, minibatch_size):\n        X_batch = X[i:i + minibatch_size, :, :, :]\n        y_batch = y[i:i + minibatch_size, ]\n        minibatches.append((X_batch, y_batch))\n    return minibatches\n\n\ndef vanilla_update(params, grads, learning_rate=0.01):\n    for param, grad in zip(params, reversed(grads)):\n        for i in range(len(grad)):\n            param[i] += - learning_rate * grad[i]\n\n\ndef momentum_update(velocity, params, grads, learning_rate=0.01, mu=0.9):\n    for v, param, grad, in zip(velocity, params, reversed(grads)):\n        for i in range(len(grad)):\n            v[i] = mu * v[i] + learning_rate * grad[i]\n            param[i] -= v[i]\n\n\ndef adagrad_update(cache, params, grads, learning_rate=0.01):\n    for c, param, grad, in zip(cache, params, reversed(grads)):\n        for i in range(len(grad)):\n            cache[i] += grad[i]**2\n            param[i] += - learning_rate * grad[i] / (np.sqrt(cache[i]) + 1e-8)\n\n\ndef rmsprop_update(cache, params, grads, learning_rate=0.01, decay_rate=0.9):\n    for c, param, grad, in zip(cache, params, reversed(grads)):\n        for i in range(len(grad)):\n            cache[i] = decay_rate * cache[i] + (1 - decay_rate) * grad[i]**2\n            param[i] += - learning_rate * grad[i] / (np.sqrt(cache[i]) + 1e-4)\n\n\ndef sgd(nnet, X_train, y_train, minibatch_size, epoch, learning_rate, verbose=True,\n        X_test=None, y_test=None):\n    minibatches = get_minibatches(X_train, y_train, minibatch_size)\n    for i in range(epoch):\n        loss = 0\n        if verbose:\n            print(\"Epoch {0}\".format(i + 1))\n        for X_mini, y_mini in minibatches:\n            loss, grads = nnet.train_step(X_mini, y_mini)\n            vanilla_update(nnet.params, grads, learning_rate=learning_rate)\n        if verbose:\n            train_acc = accuracy(y_train, nnet.predict(X_train))\n            test_acc = accuracy(y_test, nnet.predict(X_test))\n            print(\"Loss = {0} | Training Accuracy = {1} | Test Accuracy = {2}\".format(\n                loss, train_acc, test_acc))\n    return nnet\n\ndef sgd_rnn(nnet, X_train, y_train, minibatch_size, epoch, learning_rate, verbose=True):\n    for i in range(epoch):\n        loss = 0\n        if verbose:\n            print(\"Epoch {0}\".format(i + 1))\n        hidden_state = nnet.initial_state\n        loss, grads, hidden_state = nnet.train_step(X_train, y_train, hidden_state)\n\n        for k in grads.keys():\n            nnet.model[k] -= learning_rate * grads[k]\n        \n        if verbose:\n            print(\"Loss = {0}\".format(loss))\n    return nnet\n\n\ndef sgd_momentum(nnet, X_train, y_train, minibatch_size, epoch, learning_rate, mu=0.9,\n                 verbose=True, X_test=None, y_test=None, nesterov=True):\n\n    minibatches = get_minibatches(X_train, y_train, minibatch_size)\n\n    for i in range(epoch):\n        loss = 0\n        velocity = []\n        for param_layer in nnet.params:\n            p = [np.zeros_like(param) for param in list(param_layer)]\n            velocity.append(p)\n\n        if verbose:\n            print(\"Epoch {0}\".format(i + 1))\n\n        for X_mini, y_mini in minibatches:\n\n            if nesterov:\n                for param, ve in zip(nnet.params, velocity):\n                    for i in range(len(param)):\n                        param[i] += mu * ve[i]\n\n            loss, grads = nnet.train_step(X_mini, y_mini)\n            momentum_update(velocity, nnet.params, grads,\n                            learning_rate=learning_rate, mu=mu)\n\n        if verbose:\n            m_train = X_train.shape[0]\n            m_test = X_test.shape[0]\n            y_train_pred = np.array([], dtype=\"int64\")\n            y_test_pred = np.array([], dtype=\"int64\")\n            for i in range(0, m_train, minibatch_size):\n                X_tr = X_train[i:i + minibatch_size, :, :, :]\n                y_tr = y_train[i:i + minibatch_size, ]\n                y_train_pred = np.append(y_train_pred, nnet.predict(X_tr))\n            for i in range(0, m_test, minibatch_size):\n                X_te = X_test[i:i + minibatch_size, :, :, :]\n                y_te = y_test[i:i + minibatch_size, ]\n                y_test_pred = np.append(y_test_pred, nnet.predict(X_te))\n\n            train_acc = accuracy(y_train, y_train_pred)\n            test_acc = accuracy(y_test, y_test_pred)\n            print(\"Loss = {0} | Training Accuracy = {1} | Test Accuracy = {2}\".format(\n                loss, train_acc, test_acc))\n    return nnet\n\n\ndef adam(nnet, X_train, y_train, minibatch_size, epoch, learning_rate, verbose=True,\n         X_test=None, y_test=None):\n    beta1 = 0.9\n    beta2 = 0.999\n    minibatches = get_minibatches(X_train, y_train, minibatch_size)\n    for i in range(epoch):\n        loss = 0\n        velocity, cache = [], []\n        for param_layer in nnet.params:\n            p = [np.zeros_like(param) for param in list(param_layer)]\n            velocity.append(p)\n            cache.append(p)\n        if verbose:\n            print(\"Epoch {0}\".format(i + 1))\n        t = 1\n        for X_mini, y_mini in minibatches:\n            loss, grads = nnet.train_step(X_mini, y_mini)\n            for c, v, param, grad, in zip(cache, velocity, nnet.params, reversed(grads)):\n                for i in range(len(grad)):\n                    c[i] = beta1 * c[i] + (1. - beta1) * grad[i]\n                    v[i] = beta2 * v[i] + (1. - beta2) * (grad[i]**2)\n                    mt = c[i] / (1. - beta1**(t))\n                    vt = v[i] / (1. - beta2**(t))\n                    param[i] += - learning_rate * mt / (np.sqrt(vt) + 1e-4)\n            t += 1\n\n        if verbose:\n            train_acc = accuracy(y_train, nnet.predict(X_train))\n            test_acc = accuracy(y_test, nnet.predict(X_test))\n            print(\"Loss = {0} | Training Accuracy = {1} | Test Accuracy = {2}\".format(\n                loss, train_acc, test_acc))\n    return nnet\n"
  },
  {
    "path": "deepnet/utils.py",
    "content": "import numpy as np\nimport _pickle as cPickle\nimport gzip\nimport os\n\n\ndef one_hot_encode(y, num_class):\n    m = y.shape[0]\n    onehot = np.zeros((m, num_class), dtype=\"int32\")\n    for i in range(m):\n        idx = y[i]\n        onehot[i][idx] = 1\n    return onehot\n\n\ndef accuracy(y_true, y_pred):\n    return np.mean(y_pred == y_true)  # both are not one hot encoded\n\n\ndef softmax(x):\n    exp_x = np.exp(x - np.max(x, axis=1, keepdims=True))\n    return exp_x / np.sum(exp_x, axis=1, keepdims=True)\n\n\ndef load_mnist(path, num_training=50000, num_test=10000, cnn=True, one_hot=False):\n    f = gzip.open(path, 'rb')\n    training_data, validation_data, test_data = cPickle.load(\n        f, encoding='iso-8859-1')\n    f.close()\n    X_train, y_train = training_data\n    X_validation, y_validation = validation_data\n    X_test, y_test = test_data\n    if cnn:\n        shape = (-1, 1, 28, 28)\n        X_train = X_train.reshape(shape)\n        X_validation = X_validation.reshape(shape)\n        X_test = X_test.reshape(shape)\n    if one_hot:\n        y_train = one_hot_encode(y_train, 10)\n        y_validation = one_hot_encode(y_validation, 10)\n        y_test = one_hot_encode(y_test, 10)\n    X_train, y_train = X_train[range(\n        num_training)], y_train[range(num_training)]\n    X_test, y_test = X_test[range(num_test)], y_test[range(num_test)]\n    return (X_train, y_train), (X_test, y_test)\n\n\ndef load_cifar10(path, num_training=1000, num_test=1000):\n    Xs, ys = [], []\n    for batch in range(1, 6):\n        f = open(os.path.join(path, \"data_batch_{0}\".format(batch)), 'rb')\n        data = cPickle.load(f, encoding='iso-8859-1')\n        f.close()\n        X = data[\"data\"].reshape(10000, 3, 32, 32).astype(\"float64\")\n        y = np.array(data[\"labels\"])\n        Xs.append(X)\n        ys.append(y)\n    f = open(os.path.join(CIFAR10_PATH, \"test_batch\"), 'rb')\n    data = cPickle.load(f, encoding='iso-8859-1')\n    f.close()\n    X_train, y_train = np.concatenate(Xs), np.concatenate(ys)\n    X_test = data[\"data\"].reshape(10000, 3, 32, 32).astype(\"float\")\n    y_test = np.array(data[\"labels\"])\n    X_train, y_train = X_train[range(\n        num_training)], y_train[range(num_training)]\n    X_test, y_test = X_test[range(num_test)], y_test[range(num_test)]\n    mean = np.mean(X_train, axis=0)\n    std = np.std(X_train)\n    X_train /= 255.0\n    X_test /= 255.0\n    return (X_train, y_train), (X_test, y_test)\n"
  },
  {
    "path": "requirements.txt",
    "content": "numpy==1.11.3\nscipy==0.16.1\nmatplotlib==1.5.0\nipykernel==4.2.2\nipython==4.0.1\nipython-genutils==0.1.0\nipywidgets==4.1.1"
  },
  {
    "path": "run_cnn.py",
    "content": "import numpy as np\nfrom deepnet.utils import load_mnist, load_cifar10\nfrom deepnet.layers import *\nfrom deepnet.solver import sgd, sgd_momentum, adam\nfrom deepnet.nnet import CNN\nimport sys\n\n\ndef make_mnist_cnn(X_dim, num_class):\n    conv = Conv(X_dim, n_filter=32, h_filter=3,\n                w_filter=3, stride=1, padding=1)\n    relu_conv = ReLU()\n    maxpool = Maxpool(conv.out_dim, size=2, stride=1)\n    flat = Flatten()\n    fc = FullyConnected(np.prod(maxpool.out_dim), num_class)\n    return [conv, relu_conv, maxpool, flat, fc]\n\n\ndef make_cifar10_cnn(X_dim, num_class):\n    conv = Conv(X_dim, n_filter=16, h_filter=5,\n                w_filter=5, stride=1, padding=2)\n    relu = ReLU()\n    maxpool = Maxpool(conv.out_dim, size=2, stride=2)\n    conv2 = Conv(maxpool.out_dim, n_filter=20, h_filter=5,\n                 w_filter=5, stride=1, padding=2)\n    relu2 = ReLU()\n    maxpool2 = Maxpool(conv2.out_dim, size=2, stride=2)\n    flat = Flatten()\n    fc = FullyConnected(np.prod(maxpool2.out_dim), num_class)\n    return [conv, relu, maxpool, conv2, relu2, maxpool2, flat, fc]\n\n\nif __name__ == \"__main__\":\n\n    if sys.argv[1] == \"mnist\":\n\n        training_set, test_set = load_mnist(\n            'data/mnist.pkl.gz', num_training=1000, num_test=1000)\n        X, y = training_set\n        X_test, y_test = test_set\n        mnist_dims = (1, 28, 28)\n        cnn = CNN(make_mnist_cnn(mnist_dims, num_class=10))\n        cnn = sgd_momentum(cnn, X, y, minibatch_size=35, epoch=20,\n                           learning_rate=0.01, X_test=X_test, y_test=y_test)\n\n    if sys.argv[1] == \"cifar10\":\n        training_set, test_set = load_cifar10(\n            'data/cifar-10', num_training=1000, num_test=100)\n        X, y = training_set\n        X_test, y_test = test_set\n        cifar10_dims = (3, 32, 32)\n        cnn = CNN(make_cifar10_cnn(cifar10_dims, num_class=10))\n        cnn = sgd_momentum(cnn, X, y, minibatch_size=10, epoch=200,\n                           learning_rate=0.01, X_test=X_test, y_test=y_test)\n"
  },
  {
    "path": "run_rnn.py",
    "content": "import numpy as np\nfrom deepnet.nnet import RNN\nfrom deepnet.solver import sgd_rnn\n\n\ndef text_to_inputs(path):\n    \"\"\"\n    Converts the given text into X and y vectors\n    X : contains the index of all the characters in the text vocab\n    y : y[i] contains the index of next character for X[i] in the text vocab\n    \"\"\"\n    with open(path) as f:\n        txt = f.read()\n        X, y = [], []\n\n        char_to_idx = {char: i for i, char in enumerate(set(txt))}\n        idx_to_char = {i: char for i, char in enumerate(set(txt))}\n        X = np.array([char_to_idx[i] for i in txt])\n        y = [char_to_idx[i] for i in txt[1:]]\n        y.append(char_to_idx['.'])\n        y = np.array(y)\n\n        vocab_size = len(char_to_idx)\n        return X, y, vocab_size, char_to_idx, idx_to_char\n\n\nif __name__ == \"__main__\":\n\n    X, y, vocab_size, char_to_idx, idx_to_char = text_to_inputs('data/Rnn.txt')\n    rnn = RNN(vocab_size,vocab_size,char_to_idx,idx_to_char)\n    rnn = sgd_rnn(rnn,X,y,10,10,0.1)\n\n\n\n    \n"
  }
]