[
  {
    "path": ".gitignore",
    "content": "# Virtual Env\nenv/\n\n# PyCharm files\n.idea/\n\n# Byte-compiled files\n*.pyc\n\n"
  },
  {
    "path": "LICENSE.txt",
    "content": "MIT License\n\nCopyright (c) 2016 Ankesh Anand\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "**Note**: This repository is no longer maintained. \r\n\r\n# Adversarial Neural Cryptography in [TensorFlow](https://github.com/tensorflow/tensorflow)\r\n\r\nA Tensorflow Flow implementation of Google Brain's recent paper ([Learning to Protect Communications with Adversarial Neural Cryptography.](https://arxiv.org/pdf/1610.06918v1.pdf))\r\n\r\nTwo Neural Networks, Alice and Bob learn to communicate secretly with each other, in presence of an adversary Eve.\r\n\r\n![Setup](assets/diagram.png)\r\n\r\n## Pre-requisites\r\n\r\n* TensorFlow \r\n* Seaborn (for plots)\r\n* Numpy\r\n\r\n## Usage \r\nFirst, ensure you have the dependencies installed.\r\n\r\n    $ pip install -r requirements.txt\r\n\r\nTo train the neural networks, run the `main.py` script.\r\n\r\n    $ python main.py --msg-len 32 --epochs 50\r\n    \r\n    \r\n## Attribution / Thanks\r\n\r\n* carpedm20's DCGAN [implementation](https://github.com/carpedm20/DCGAN-tensorflow) in TensorFlow. \r\n* Liam's [implementation](https://github.com/nlml/adversarial-neural-crypt) of Adversarial Neural Cryptography in Theano. \r\n\r\n## Citing Code\r\nIf you want to cite this code in your work, refer to the following DOI:\r\n\r\n[![DOI](https://zenodo.org/badge/73807045.svg)](https://zenodo.org/badge/latestdoi/73807045)\r\n\r\n## License\r\n\r\nMIT\r\n"
  },
  {
    "path": "main.py",
    "content": "import tensorflow as tf\n\nfrom argparse import ArgumentParser\nfrom src.model import CryptoNet\nfrom src.config import *\n\n\ndef build_parser():\n    parser = ArgumentParser()\n\n    parser.add_argument('--msg-len', type=int,\n                        dest='msg_len', help='message length',\n                        metavar='MSG_LEN', default=MSG_LEN)\n\n    parser.add_argument('--learning-rate', type=float,\n                        dest='learning_rate',\n                        help='learning rate (default %(default)s)',\n                        metavar='LEARNING_RATE', default=LEARNING_RATE)\n\n    parser.add_argument('--epochs', type=int,\n                        dest='epochs', help='Number of Epochs in Adversarial Training',\n                        metavar='EPOCHS', default=NUM_EPOCHS)\n\n    parser.add_argument('--batch-size', type=int,\n                        dest='batch_size', help='batch size',\n                        metavar='BATCH_SIZE', default=BATCH_SIZE)\n\n    return parser\n\n\ndef main():\n    parser = build_parser()\n    options = parser.parse_args()\n\n    with tf.Session() as sess:\n        crypto_net = CryptoNet(sess, msg_len=options.msg_len, epochs=options.epochs,\n                               batch_size=options.batch_size, learning_rate=options.learning_rate)\n\n        crypto_net.train()\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "requirements.txt",
    "content": "cycler==0.10.0\nfuncsigs==1.0.2\nmatplotlib==2.1.0\nmock==2.0.0\nnumpy==1.13.3\npandas==0.21.0\npbr==3.1.1\nprotobuf==3.5.0.post1\npyparsing==2.1.10\npython-dateutil==2.6.0\npytz==2017.3\nscipy==1.0.0\nseaborn==0.8.1\nsix==1.11.0\ntensorflow==1.4.0\n"
  },
  {
    "path": "src/__init__.py",
    "content": ""
  },
  {
    "path": "src/config.py",
    "content": "FILTERS = [\n    [4, 1, 2],\n    [2, 2, 4],\n    [1, 4, 4],\n    [1, 4, 1]\n]\n\nMSG_LEN = 16\nKEY_LEN = 16\nBATCH_SIZE = 512\nNUM_EPOCHS = 60\nLEARNING_RATE = 0.0008\n"
  },
  {
    "path": "src/layers.py",
    "content": "import tensorflow as tf\nfrom config import FILTERS\n\n\ndef conv1d(input_, filter_shape, stride, name=\"conv1d\"):\n    with tf.variable_scope(name):\n        w = tf.get_variable('w', shape=filter_shape,\n                            initializer=tf.contrib.layers.xavier_initializer())\n        conv = tf.nn.conv1d(input_, w, stride, padding='SAME')\n\n        return conv\n\n\ndef conv_layer(hidden_layer_output, name):\n    h0 = tf.nn.relu(conv1d(hidden_layer_output, FILTERS[0], stride=1, name=name+'_h0_conv'))\n    h1 = tf.nn.relu(conv1d(h0, FILTERS[1], stride=2, name=name+'_h1_conv'))\n    h2 = tf.nn.relu(conv1d(h1, FILTERS[2], stride=1, name=name+'_h2_conv'))\n    h3 = tf.nn.tanh(conv1d(h2, FILTERS[3], stride=1, name=name+'_h3_conv'))\n\n    return h3\n"
  },
  {
    "path": "src/model.py",
    "content": "import tensorflow as tf\nimport numpy as np\n\nimport matplotlib\n# OSX fix\nmatplotlib.use('TkAgg')\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom layers import conv_layer\nfrom config import *\nfrom utils import init_weights, gen_data\n\n\nclass CryptoNet(object):\n    def __init__(self, sess, msg_len=MSG_LEN, batch_size=BATCH_SIZE,\n                 epochs=NUM_EPOCHS, learning_rate=LEARNING_RATE):\n        \"\"\"\n        Args:\n            sess: TensorFlow session\n            msg_len: The length of the input message to encrypt.\n            key_len: Length of Alice and Bob's private key.\n            batch_size: Minibatch size for each adversarial training\n            epochs: Number of epochs in the adversarial training\n            learning_rate: Learning Rate for Adam Optimizer\n        \"\"\"\n\n        self.sess = sess\n        self.msg_len = msg_len\n        self.key_len = self.msg_len\n        self.N = self.msg_len\n        self.batch_size = batch_size\n        self.epochs = epochs\n        self.learning_rate = learning_rate\n\n        self.build_model()\n\n    def build_model(self):\n        # Weights for fully connected layers\n        self.w_alice = init_weights(\"alice_w\", [2 * self.N, 2 * self.N])\n        self.w_bob = init_weights(\"bob_w\", [2 * self.N, 2 * self.N])\n        self.w_eve1 = init_weights(\"eve_w1\", [self.N, 2 * self.N])\n        self.w_eve2 = init_weights(\"eve_w2\", [2 * self.N, 2 * self.N])\n\n        # Placeholder variables for Message and Key\n        self.msg = tf.placeholder(\"float\", [None, self.msg_len])\n        self.key = tf.placeholder(\"float\", [None, self.key_len])\n\n        # Alice's network\n        # FC layer -> Conv Layer (4 1-D convolutions)\n        self.alice_input = tf.concat([self.msg, self.key],1)\n        self.alice_hidden = tf.nn.sigmoid(tf.matmul(self.alice_input, self.w_alice))\n        self.alice_hidden = tf.expand_dims(self.alice_hidden, 2)\n        self.alice_output = tf.squeeze(conv_layer(self.alice_hidden, \"alice\"))\n\n        # Bob's network\n        # FC layer -> Conv Layer (4 1-D convolutions)\n        self.bob_input = tf.concat([self.alice_output, self.key],1)\n        self.bob_hidden = tf.nn.sigmoid(tf.matmul(self.bob_input, self.w_bob))\n        self.bob_hidden = tf.expand_dims(self.bob_hidden, 2)\n        self.bob_output = tf.squeeze(conv_layer(self.bob_hidden, \"bob\"))\n\n        # Eve's network\n        # FC layer -> FC layer -> Conv Layer (4 1-D convolutions)\n        self.eve_input = self.alice_output\n        self.eve_hidden1 = tf.nn.sigmoid(tf.matmul(self.eve_input, self.w_eve1))\n        self.eve_hidden2 = tf.nn.sigmoid(tf.matmul(self.eve_hidden1, self.w_eve2))\n        self.eve_hidden2 = tf.expand_dims(self.eve_hidden2, 2)\n        self.eve_output = tf.squeeze(conv_layer(self.eve_hidden2, \"eve\"))\n\n    def train(self):\n        # Loss Functions\n        self.decrypt_err_eve = tf.reduce_mean(tf.abs(self.msg - self.eve_output))\n        self.decrypt_err_bob = tf.reduce_mean(tf.abs(self.msg - self.bob_output))\n        self.loss_bob = self.decrypt_err_bob + (1. - self.decrypt_err_eve) ** 2.\n\n        # Get training variables corresponding to each network\n        self.t_vars = tf.trainable_variables()\n        self.alice_or_bob_vars = [var for var in self.t_vars if 'alice_' in var.name or 'bob_' in var.name]\n        self.eve_vars = [var for var in self.t_vars if 'eve_' in var.name]\n\n        # Build the optimizers\n        self.bob_optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(\n            self.loss_bob, var_list=self.alice_or_bob_vars)\n        self.eve_optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(\n            self.decrypt_err_eve, var_list=self.eve_vars)\n\n        self.bob_errors, self.eve_errors = [], []\n\n        # Begin Training\n        tf.global_variables_initializer().run()\n        for i in range(self.epochs):\n            iterations = 2000\n\n            print 'Training Alice and Bob, Epoch:', i + 1\n            bob_loss, _ = self._train('bob', iterations)\n            self.bob_errors.append(bob_loss)\n\n            print 'Training Eve, Epoch:', i + 1\n            _, eve_loss = self._train('eve', iterations)\n            self.eve_errors.append(eve_loss)\n\n        self.plot_errors()\n\n    def _train(self, network, iterations):\n        bob_decrypt_error, eve_decrypt_error = 1., 1.\n\n        bs = self.batch_size\n        # Train Eve for two minibatches to give it a slight computational edge\n        if network == 'eve':\n            bs *= 2\n\n        for i in range(iterations):\n            msg_in_val, key_val = gen_data(n=bs, msg_len=self.msg_len, key_len=self.key_len)\n\n            if network == 'bob':\n                _, decrypt_err = self.sess.run([self.bob_optimizer, self.decrypt_err_bob],\n                                               feed_dict={self.msg: msg_in_val, self.key: key_val})\n                bob_decrypt_error = min(bob_decrypt_error, decrypt_err)\n\n            elif network == 'eve':\n                _, decrypt_err = self.sess.run([self.eve_optimizer, self.decrypt_err_eve],\n                                               feed_dict={self.msg: msg_in_val, self.key: key_val})\n                eve_decrypt_error = min(eve_decrypt_error, decrypt_err)\n\n        return bob_decrypt_error, eve_decrypt_error\n\n    def plot_errors(self):\n        \"\"\"\n        Plot Lowest Decryption Errors achieved by Bob and Eve per epoch\n        \"\"\"\n        sns.set_style(\"darkgrid\")\n        plt.plot(self.bob_errors)\n        plt.plot(self.eve_errors)\n        plt.legend(['bob', 'eve'])\n        plt.xlabel('Epoch')\n        plt.ylabel('Lowest Decryption error achieved')\n        plt.show()\n\n"
  },
  {
    "path": "src/utils.py",
    "content": "import numpy as np\nimport tensorflow as tf\nfrom config import *\n\n\n# Function to generate n random messages and keys\ndef gen_data(n=BATCH_SIZE, msg_len=MSG_LEN, key_len=KEY_LEN):\n    return (np.random.randint(0, 2, size=(n, msg_len))*2-1), \\\n           (np.random.randint(0, 2, size=(n, key_len))*2-1)\n\n\n# Xavier Glotrot initialization of weights\ndef init_weights(name, shape):\n    return tf.get_variable(name, shape=shape,\n                           initializer=tf.contrib.layers.xavier_initializer())"
  }
]