Repository: ankeshanand/neural-cryptography-tensorflow
Branch: master
Commit: 8691f580ff07
Files: 10
Total size: 10.7 KB
Directory structure:
gitextract_iprf4wtj/
├── .gitignore
├── LICENSE.txt
├── README.md
├── main.py
├── requirements.txt
└── src/
├── __init__.py
├── config.py
├── layers.py
├── model.py
└── utils.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
# Virtual Env
env/
# PyCharm files
.idea/
# Byte-compiled files
*.pyc
================================================
FILE: LICENSE.txt
================================================
MIT License
Copyright (c) 2016 Ankesh Anand
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: README.md
================================================
**Note**: This repository is no longer maintained.
# Adversarial Neural Cryptography in [TensorFlow](https://github.com/tensorflow/tensorflow)
A Tensorflow Flow implementation of Google Brain's recent paper ([Learning to Protect Communications with Adversarial Neural Cryptography.](https://arxiv.org/pdf/1610.06918v1.pdf))
Two Neural Networks, Alice and Bob learn to communicate secretly with each other, in presence of an adversary Eve.

## Pre-requisites
* TensorFlow
* Seaborn (for plots)
* Numpy
## Usage
First, ensure you have the dependencies installed.
$ pip install -r requirements.txt
To train the neural networks, run the `main.py` script.
$ python main.py --msg-len 32 --epochs 50
## Attribution / Thanks
* carpedm20's DCGAN [implementation](https://github.com/carpedm20/DCGAN-tensorflow) in TensorFlow.
* Liam's [implementation](https://github.com/nlml/adversarial-neural-crypt) of Adversarial Neural Cryptography in Theano.
## Citing Code
If you want to cite this code in your work, refer to the following DOI:
[](https://zenodo.org/badge/latestdoi/73807045)
## License
MIT
================================================
FILE: main.py
================================================
import tensorflow as tf
from argparse import ArgumentParser
from src.model import CryptoNet
from src.config import *
def build_parser():
parser = ArgumentParser()
parser.add_argument('--msg-len', type=int,
dest='msg_len', help='message length',
metavar='MSG_LEN', default=MSG_LEN)
parser.add_argument('--learning-rate', type=float,
dest='learning_rate',
help='learning rate (default %(default)s)',
metavar='LEARNING_RATE', default=LEARNING_RATE)
parser.add_argument('--epochs', type=int,
dest='epochs', help='Number of Epochs in Adversarial Training',
metavar='EPOCHS', default=NUM_EPOCHS)
parser.add_argument('--batch-size', type=int,
dest='batch_size', help='batch size',
metavar='BATCH_SIZE', default=BATCH_SIZE)
return parser
def main():
parser = build_parser()
options = parser.parse_args()
with tf.Session() as sess:
crypto_net = CryptoNet(sess, msg_len=options.msg_len, epochs=options.epochs,
batch_size=options.batch_size, learning_rate=options.learning_rate)
crypto_net.train()
if __name__ == '__main__':
main()
================================================
FILE: requirements.txt
================================================
cycler==0.10.0
funcsigs==1.0.2
matplotlib==2.1.0
mock==2.0.0
numpy==1.13.3
pandas==0.21.0
pbr==3.1.1
protobuf==3.5.0.post1
pyparsing==2.1.10
python-dateutil==2.6.0
pytz==2017.3
scipy==1.0.0
seaborn==0.8.1
six==1.11.0
tensorflow==1.4.0
================================================
FILE: src/__init__.py
================================================
================================================
FILE: src/config.py
================================================
FILTERS = [
[4, 1, 2],
[2, 2, 4],
[1, 4, 4],
[1, 4, 1]
]
MSG_LEN = 16
KEY_LEN = 16
BATCH_SIZE = 512
NUM_EPOCHS = 60
LEARNING_RATE = 0.0008
================================================
FILE: src/layers.py
================================================
import tensorflow as tf
from config import FILTERS
def conv1d(input_, filter_shape, stride, name="conv1d"):
with tf.variable_scope(name):
w = tf.get_variable('w', shape=filter_shape,
initializer=tf.contrib.layers.xavier_initializer())
conv = tf.nn.conv1d(input_, w, stride, padding='SAME')
return conv
def conv_layer(hidden_layer_output, name):
h0 = tf.nn.relu(conv1d(hidden_layer_output, FILTERS[0], stride=1, name=name+'_h0_conv'))
h1 = tf.nn.relu(conv1d(h0, FILTERS[1], stride=2, name=name+'_h1_conv'))
h2 = tf.nn.relu(conv1d(h1, FILTERS[2], stride=1, name=name+'_h2_conv'))
h3 = tf.nn.tanh(conv1d(h2, FILTERS[3], stride=1, name=name+'_h3_conv'))
return h3
================================================
FILE: src/model.py
================================================
import tensorflow as tf
import numpy as np
import matplotlib
# OSX fix
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import seaborn as sns
from layers import conv_layer
from config import *
from utils import init_weights, gen_data
class CryptoNet(object):
def __init__(self, sess, msg_len=MSG_LEN, batch_size=BATCH_SIZE,
epochs=NUM_EPOCHS, learning_rate=LEARNING_RATE):
"""
Args:
sess: TensorFlow session
msg_len: The length of the input message to encrypt.
key_len: Length of Alice and Bob's private key.
batch_size: Minibatch size for each adversarial training
epochs: Number of epochs in the adversarial training
learning_rate: Learning Rate for Adam Optimizer
"""
self.sess = sess
self.msg_len = msg_len
self.key_len = self.msg_len
self.N = self.msg_len
self.batch_size = batch_size
self.epochs = epochs
self.learning_rate = learning_rate
self.build_model()
def build_model(self):
# Weights for fully connected layers
self.w_alice = init_weights("alice_w", [2 * self.N, 2 * self.N])
self.w_bob = init_weights("bob_w", [2 * self.N, 2 * self.N])
self.w_eve1 = init_weights("eve_w1", [self.N, 2 * self.N])
self.w_eve2 = init_weights("eve_w2", [2 * self.N, 2 * self.N])
# Placeholder variables for Message and Key
self.msg = tf.placeholder("float", [None, self.msg_len])
self.key = tf.placeholder("float", [None, self.key_len])
# Alice's network
# FC layer -> Conv Layer (4 1-D convolutions)
self.alice_input = tf.concat([self.msg, self.key],1)
self.alice_hidden = tf.nn.sigmoid(tf.matmul(self.alice_input, self.w_alice))
self.alice_hidden = tf.expand_dims(self.alice_hidden, 2)
self.alice_output = tf.squeeze(conv_layer(self.alice_hidden, "alice"))
# Bob's network
# FC layer -> Conv Layer (4 1-D convolutions)
self.bob_input = tf.concat([self.alice_output, self.key],1)
self.bob_hidden = tf.nn.sigmoid(tf.matmul(self.bob_input, self.w_bob))
self.bob_hidden = tf.expand_dims(self.bob_hidden, 2)
self.bob_output = tf.squeeze(conv_layer(self.bob_hidden, "bob"))
# Eve's network
# FC layer -> FC layer -> Conv Layer (4 1-D convolutions)
self.eve_input = self.alice_output
self.eve_hidden1 = tf.nn.sigmoid(tf.matmul(self.eve_input, self.w_eve1))
self.eve_hidden2 = tf.nn.sigmoid(tf.matmul(self.eve_hidden1, self.w_eve2))
self.eve_hidden2 = tf.expand_dims(self.eve_hidden2, 2)
self.eve_output = tf.squeeze(conv_layer(self.eve_hidden2, "eve"))
def train(self):
# Loss Functions
self.decrypt_err_eve = tf.reduce_mean(tf.abs(self.msg - self.eve_output))
self.decrypt_err_bob = tf.reduce_mean(tf.abs(self.msg - self.bob_output))
self.loss_bob = self.decrypt_err_bob + (1. - self.decrypt_err_eve) ** 2.
# Get training variables corresponding to each network
self.t_vars = tf.trainable_variables()
self.alice_or_bob_vars = [var for var in self.t_vars if 'alice_' in var.name or 'bob_' in var.name]
self.eve_vars = [var for var in self.t_vars if 'eve_' in var.name]
# Build the optimizers
self.bob_optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(
self.loss_bob, var_list=self.alice_or_bob_vars)
self.eve_optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(
self.decrypt_err_eve, var_list=self.eve_vars)
self.bob_errors, self.eve_errors = [], []
# Begin Training
tf.global_variables_initializer().run()
for i in range(self.epochs):
iterations = 2000
print 'Training Alice and Bob, Epoch:', i + 1
bob_loss, _ = self._train('bob', iterations)
self.bob_errors.append(bob_loss)
print 'Training Eve, Epoch:', i + 1
_, eve_loss = self._train('eve', iterations)
self.eve_errors.append(eve_loss)
self.plot_errors()
def _train(self, network, iterations):
bob_decrypt_error, eve_decrypt_error = 1., 1.
bs = self.batch_size
# Train Eve for two minibatches to give it a slight computational edge
if network == 'eve':
bs *= 2
for i in range(iterations):
msg_in_val, key_val = gen_data(n=bs, msg_len=self.msg_len, key_len=self.key_len)
if network == 'bob':
_, decrypt_err = self.sess.run([self.bob_optimizer, self.decrypt_err_bob],
feed_dict={self.msg: msg_in_val, self.key: key_val})
bob_decrypt_error = min(bob_decrypt_error, decrypt_err)
elif network == 'eve':
_, decrypt_err = self.sess.run([self.eve_optimizer, self.decrypt_err_eve],
feed_dict={self.msg: msg_in_val, self.key: key_val})
eve_decrypt_error = min(eve_decrypt_error, decrypt_err)
return bob_decrypt_error, eve_decrypt_error
def plot_errors(self):
"""
Plot Lowest Decryption Errors achieved by Bob and Eve per epoch
"""
sns.set_style("darkgrid")
plt.plot(self.bob_errors)
plt.plot(self.eve_errors)
plt.legend(['bob', 'eve'])
plt.xlabel('Epoch')
plt.ylabel('Lowest Decryption error achieved')
plt.show()
================================================
FILE: src/utils.py
================================================
import numpy as np
import tensorflow as tf
from config import *
# Function to generate n random messages and keys
def gen_data(n=BATCH_SIZE, msg_len=MSG_LEN, key_len=KEY_LEN):
return (np.random.randint(0, 2, size=(n, msg_len))*2-1), \
(np.random.randint(0, 2, size=(n, key_len))*2-1)
# Xavier Glotrot initialization of weights
def init_weights(name, shape):
return tf.get_variable(name, shape=shape,
initializer=tf.contrib.layers.xavier_initializer())
gitextract_iprf4wtj/
├── .gitignore
├── LICENSE.txt
├── README.md
├── main.py
├── requirements.txt
└── src/
├── __init__.py
├── config.py
├── layers.py
├── model.py
└── utils.py
SYMBOL INDEX (12 symbols across 4 files)
FILE: main.py
function build_parser (line 8) | def build_parser():
function main (line 31) | def main():
FILE: src/layers.py
function conv1d (line 5) | def conv1d(input_, filter_shape, stride, name="conv1d"):
function conv_layer (line 14) | def conv_layer(hidden_layer_output, name):
FILE: src/model.py
class CryptoNet (line 16) | class CryptoNet(object):
method __init__ (line 17) | def __init__(self, sess, msg_len=MSG_LEN, batch_size=BATCH_SIZE,
method build_model (line 39) | def build_model(self):
method train (line 72) | def train(self):
method _train (line 106) | def _train(self, network, iterations):
method plot_errors (line 129) | def plot_errors(self):
FILE: src/utils.py
function gen_data (line 7) | def gen_data(n=BATCH_SIZE, msg_len=MSG_LEN, key_len=KEY_LEN):
function init_weights (line 13) | def init_weights(name, shape):
Condensed preview — 10 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (12K chars).
[
{
"path": ".gitignore",
"chars": 73,
"preview": "# Virtual Env\nenv/\n\n# PyCharm files\n.idea/\n\n# Byte-compiled files\n*.pyc\n\n"
},
{
"path": "LICENSE.txt",
"chars": 1069,
"preview": "MIT License\n\nCopyright (c) 2016 Ankesh Anand\n\nPermission is hereby granted, free of charge, to any person obtaining a co"
},
{
"path": "README.md",
"chars": 1238,
"preview": "**Note**: This repository is no longer maintained. \r\n\r\n# Adversarial Neural Cryptography in [TensorFlow](https://github."
},
{
"path": "main.py",
"chars": 1337,
"preview": "import tensorflow as tf\n\nfrom argparse import ArgumentParser\nfrom src.model import CryptoNet\nfrom src.config import *\n\n\n"
},
{
"path": "requirements.txt",
"chars": 235,
"preview": "cycler==0.10.0\nfuncsigs==1.0.2\nmatplotlib==2.1.0\nmock==2.0.0\nnumpy==1.13.3\npandas==0.21.0\npbr==3.1.1\nprotobuf==3.5.0.pos"
},
{
"path": "src/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "src/config.py",
"chars": 156,
"preview": "FILTERS = [\n [4, 1, 2],\n [2, 2, 4],\n [1, 4, 4],\n [1, 4, 1]\n]\n\nMSG_LEN = 16\nKEY_LEN = 16\nBATCH_SIZE = 512\nNUM"
},
{
"path": "src/layers.py",
"chars": 742,
"preview": "import tensorflow as tf\nfrom config import FILTERS\n\n\ndef conv1d(input_, filter_shape, stride, name=\"conv1d\"):\n with t"
},
{
"path": "src/model.py",
"chars": 5593,
"preview": "import tensorflow as tf\nimport numpy as np\n\nimport matplotlib\n# OSX fix\nmatplotlib.use('TkAgg')\n\nimport matplotlib.pyplo"
},
{
"path": "src/utils.py",
"chars": 501,
"preview": "import numpy as np\nimport tensorflow as tf\nfrom config import *\n\n\n# Function to generate n random messages and keys\ndef "
}
]
About this extraction
This page contains the full source code of the ankeshanand/neural-cryptography-tensorflow GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 10 files (10.7 KB), approximately 2.9k tokens, and a symbol index with 12 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.