master be3cd0c08d2d cached
80 files
44.2 MB
89.1k tokens
66 symbols
1 requests
Download .txt
Showing preview only (266K chars total). Download the full file or copy to clipboard to get everything.
Repository: PacktPublishing/Deep-Learning-with-TensorFlow
Branch: master
Commit: be3cd0c08d2d
Files: 80
Total size: 44.2 MB

Directory structure:
gitextract_0dylqyzr/

├── Chapter02/
│   ├── Python 2.7/
│   │   ├── computation_model.py
│   │   ├── data_model.py
│   │   ├── feeding_parameters.py
│   │   ├── fetching_parameters_1.py
│   │   ├── programming_model.py
│   │   ├── single_neuron_model_1.py
│   │   ├── tensor_flow_counter_1.py
│   │   └── tensor_with_numpy_1.py
│   └── Python 3.5/
│       ├── computation_model.py
│       ├── data_model.py
│       ├── feeding_parameters.py
│       ├── fetching_parameters_1.py
│       ├── programming_model.py
│       ├── single_neuron_model_1.py
│       ├── tensor_flow_counter_1.py
│       └── tensor_with_numpy_1.py
├── Chapter03/
│   ├── Python 2.7/
│   │   ├── five_layers_relu_1.py
│   │   ├── five_layers_relu_dropout_1.py
│   │   ├── five_layers_sigmoid_1.py
│   │   ├── softmax_classifier_1.py
│   │   ├── softmax_model_loader_1.py
│   │   └── softmax_model_saver_1.py
│   └── Python 3.5/
│       ├── five_layers_relu_1.py
│       ├── five_layers_relu_dropout_1.py
│       ├── five_layers_sigmoid_1.py
│       ├── softmax_classifier_1.py
│       ├── softmax_model_loader_1.py
│       └── softmax_model_saver_1.py
├── Chapter04/
│   ├── EMOTION_CNN/
│   │   ├── EmotionDetector/
│   │   │   ├── test.csv
│   │   │   └── train.csv
│   │   ├── Python 2.7/
│   │   │   ├── EmotionDetectorUtils.py
│   │   │   ├── EmotionDetector_1.py
│   │   │   └── test_your_image.py
│   │   └── Python 3.5/
│   │       ├── EmotionDetectorUtils.py
│   │       ├── EmotionDetector_1.py
│   │       ├── __init__.py
│   │       └── test_your_image.py
│   └── MNIST_CNN/
│       ├── Python 2.7/
│       │   └── mnist_cnn_1.py
│       └── Python 3.5/
│           └── mnist_cnn_1.py
├── Chapter05/
│   ├── Python 2.7/
│   │   ├── Convlutional_AutoEncoder.py
│   │   ├── autoencoder_1.py
│   │   ├── deconvolutional_autoencoder_1.py
│   │   └── denoising_autoencoder_1.py
│   └── Python 3.5/
│       ├── Convlutional_AutoEncoder.py
│       ├── __init__.py
│       ├── autoencoder_1.py
│       ├── deconvolutional_autoencoder_1.py
│       └── denoising_autoencoder_1.py
├── Chapter06/
│   ├── Python 2.7/
│   │   ├── LSTM_model_1.py
│   │   ├── __init__.py
│   │   └── bidirectional_RNN_1.py
│   └── Python 3.5/
│       ├── LSTM_model_1.py
│       ├── __init__.py
│       └── bidirectional_RNN_1.py
├── Chapter07/
│   ├── Python 2.7/
│   │   ├── gpu_computing_with_multiple_GPU.py
│   │   ├── gpu_example.py
│   │   └── gpu_soft_placemnet_1.py
│   └── Python 3.5/
│       ├── gpu_computing_with_multiple_GPU.py
│       ├── gpu_example.py
│       └── gpu_soft_placemnet_1.py
├── Chapter08/
│   ├── Python 2.7/
│   │   ├── digit_classifier.py
│   │   ├── keras_movie_classifier_1.py
│   │   ├── keras_movie_classifier_using_convLayer_1.py
│   │   ├── pretty_tensor_digit_1.py
│   │   └── tflearn_titanic_classifier.py
│   ├── Python 3.5/
│   │   ├── __init__.py
│   │   ├── digit_classifier.py
│   │   ├── keras_movie_classifier_1.py
│   │   ├── keras_movie_classifier_using_convLayer_1.py
│   │   ├── pretty_tensor_digit_1.py
│   │   └── tflearn_titanic_classifier.py
│   └── data/
│       └── titanic_dataset.csv
├── Chapter09/
│   ├── Python 2.7/
│   │   └── classify_image.py
│   └── Python 3.5/
│       └── classify_image.py
├── Chapter10/
│   ├── Python 2.7/
│   │   ├── FrozenLake_1.py
│   │   └── Q_Learning_1.py
│   └── Python 3.5/
│       ├── FrozenLake_1.py
│       └── Q_Learning_1.py
├── LICENSE
└── README.md

================================================
FILE CONTENTS
================================================

================================================
FILE: Chapter02/Python 2.7/computation_model.py
================================================
import tensorflow as tf
with tf.Session() as session:
    x = tf.placeholder(tf.float32,[1],name="x")
    y = tf.placeholder(tf.float32,[1],name="y")
    z = tf.constant(2.0)
    y = x * z
x_in = [100]
y_output = session.run(y,{x:x_in})
print(y_output)


================================================
FILE: Chapter02/Python 2.7/data_model.py
================================================
import tensorflow as tf

scalar = tf.constant(100)
vector = tf.constant([1,2,3,4,5])
matrix = tf.constant([[1,2,3],[4,5,6]])
cube_matrix = tf.constant([[[1],[2],[3]],[[4],[5],[6]],[[7],[8],[9]]])


print(scalar.get_shape())
print(vector.get_shape())
print(matrix.get_shape())
print(cube_matrix.get_shape())

"""
>>> 
()
(5,)
(2, 3)
(3, 3, 1)
>>> 
"""



================================================
FILE: Chapter02/Python 2.7/feeding_parameters.py
================================================
import tensorflow as tf
import numpy as np

a = 3
b = 2


x = tf.placeholder(tf.float32,shape=(a,b))
y = tf.add(x,x)

data = np.random.rand(a,b)

sess = tf.Session()

print sess.run(y,feed_dict={x:data})



================================================
FILE: Chapter02/Python 2.7/fetching_parameters_1.py
================================================
import tensorflow as tf

constant_A = tf.constant([100.0])
constant_B = tf.constant([300.0])
constant_C = tf.constant([3.0])

sum_ = tf.add(constant_A,constant_B)
mul_ = tf.multiply(constant_A,constant_C)

with tf.Session() as sess:
    result = sess.run([sum_,mul_])
    print(result)


"""
>>> 
[array([ 400.], dtype=float32), array([ 300.], dtype=float32)]
>>> 
"""


================================================
FILE: Chapter02/Python 2.7/programming_model.py
================================================
import tensorflow as tf
with tf.Session() as session:
    x = tf.placeholder(tf.float32,[1],name="x")
    y = tf.placeholder(tf.float32,[1],name="y")
    z = tf.constant(2.0)
    y = x * z
x_in = [100]
y_output = session.run(y,{x:x_in})
print(y_output)


================================================
FILE: Chapter02/Python 2.7/single_neuron_model_1.py
================================================
import tensorflow as tf

weight = tf.Variable(1.0,name="weight")
input_value = tf.constant(0.5,name="input_value")
expected_output = tf.constant(0.0,name="expected_output")
model = tf.multiply(input_value,weight,"model")
loss_function = tf.pow(expected_output - model,2,name="loss_function")

optimizer = tf.train.GradientDescentOptimizer(0.025).minimize(loss_function)

for value in [input_value,weight,expected_output,model,loss_function]:
    tf.summary.scalar(value.op.name,value)

summaries = tf.summary.merge_all()
sess = tf.Session()

summary_writer = tf.summary.FileWriter('log_simple_stats',sess.graph)

sess.run(tf.global_variables_initializer())
for i in range(100):
    summary_writer.add_summary(sess.run(summaries),i)
    sess.run(optimizer)


================================================
FILE: Chapter02/Python 2.7/tensor_flow_counter_1.py
================================================
import tensorflow as tf

value = tf.Variable(0,name="value")
one = tf.constant(1)
new_value = tf.add(value,one)
update_value=tf.assign(value,new_value)

initialize_var = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(initialize_var)
    print(sess.run(value))
    for _ in range(10):
        sess.run(update_value)
        print(sess.run(value))

"""
>>> 
0
1
2
3
4
5
6
7
8
9
10
>>>
"""     


================================================
FILE: Chapter02/Python 2.7/tensor_with_numpy_1.py
================================================
import tensorflow as tf
import numpy as np

#tensore 1d con valori costanti
tensor_1d = np.array([1,2,3,4,5,6,7,8,9,10])
tensor_1d = tf.constant(tensor_1d)
with tf.Session() as sess:
    print (tensor_1d.get_shape())
    print sess.run(tensor_1d)

#tensore 2d con valori variabili
tensor_2d = np.array([(1,2,3),(4,5,6),(7,8,9)])
tensor_2d = tf.Variable(tensor_2d)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print (tensor_2d.get_shape())
    print sess.run(tensor_2d)


tensor_3d = np.array([[[ 0,  1,  2],[ 3,  4,  5],[ 6,  7,  8]],
                      [[ 9, 10, 11],[12, 13, 14],[15, 16, 17]],
                      [[18, 19, 20],[21, 22, 23],[24, 25, 26]]])

tensor_3d = tf.convert_to_tensor(tensor_3d, dtype=tf.float64)
with tf.Session() as sess:
    print (tensor_3d.get_shape())
    print sess.run(tensor_3d)


interactive_session = tf.InteractiveSession()
tensor = np.array([1,2,3,4,5])
tensor = tf.constant(tensor)
print(tensor.eval())
interactive_session.close()

"""
Python 2.7.10 (default, Oct 14 2015, 16:09:02) 
[GCC 5.2.1 20151010] on linux2
Type "copyright", "credits" or "license()" for more information.
>>> ================================ RESTART ================================
>>> 
(10,)
[ 1  2  3  4  5  6  7  8  9 10]
(3, 3)
[[1 2 3]
 [4 5 6]
 [7 8 9]]
(3, 3, 3)
[[[  0.   1.   2.]
  [  3.   4.   5.]
  [  6.   7.   8.]]

 [[  9.  10.  11.]
  [ 12.  13.  14.]
  [ 15.  16.  17.]]

 [[ 18.  19.  20.]
  [ 21.  22.  23.]
  [ 24.  25.  26.]]]
[1 2 3 4 5]
>>> 
"""




================================================
FILE: Chapter02/Python 3.5/computation_model.py
================================================
import tensorflow as tf
with tf.Session() as session:
    x = tf.placeholder(tf.float32, [1], name="x")
    y = tf.placeholder(tf.float32, [1], name="y")
    z = tf.constant(2.0)
    y = x * z
x_in = [100]
y_output = session.run(y, {x: x_in})
print(y_output)


================================================
FILE: Chapter02/Python 3.5/data_model.py
================================================
import tensorflow as tf

scalar = tf.constant(100)
vector = tf.constant([1, 2, 3, 4, 5])
matrix = tf.constant([[1, 2, 3], [4, 5, 6]])
cube_matrix = tf.constant([[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]])

print(scalar.get_shape())
print(vector.get_shape())
print(matrix.get_shape())
print(cube_matrix.get_shape())

"""
>>> 
()
(5,)
(2, 3)
(3, 3, 1)
>>> 
"""



================================================
FILE: Chapter02/Python 3.5/feeding_parameters.py
================================================
import tensorflow as tf
import numpy as np

a = 3
b = 2

x = tf.placeholder(tf.float32, shape=(a, b))
y = tf.add(x, x)

data = np.random.rand(a, b)

sess = tf.Session()

print(sess.run(y,feed_dict={x: data}))



================================================
FILE: Chapter02/Python 3.5/fetching_parameters_1.py
================================================
import tensorflow as tf

constant_A = tf.constant([100.0])
constant_B = tf.constant([300.0])
constant_C = tf.constant([3.0])

sum_ = tf.add(constant_A, constant_B)
mul_ = tf.multiply(constant_A, constant_C)

with tf.Session() as sess:
    result = sess.run([sum_, mul_])
    print(result)


"""
>>> 
[array([ 400.], dtype=float32), array([ 300.], dtype=float32)]
>>> 
"""


================================================
FILE: Chapter02/Python 3.5/programming_model.py
================================================
import tensorflow as tf

with tf.Session() as session:
    x = tf.placeholder(tf.float32, [1], name="x")
    y = tf.placeholder(tf.float32, [1], name="y")
    z = tf.constant(2.0)
    y = x * z

x_in = [100]
y_output = session.run(y, {x: x_in})
print(y_output)


================================================
FILE: Chapter02/Python 3.5/single_neuron_model_1.py
================================================
import tensorflow as tf

weight = tf.Variable(1.0, name="weight")
input_value = tf.constant(0.5, name="input_value")
expected_output = tf.constant(0.0, name="expected_output")
model = tf.multiply(input_value,weight, "model")
loss_function = tf.pow(expected_output - model, 2, name="loss_function")

optimizer = tf.train.GradientDescentOptimizer(0.025).minimize(loss_function)

for value in [input_value, weight, expected_output, model, loss_function]:
    tf.summary.scalar(value.op.name, value)

summaries = tf.summary.merge_all()
sess = tf.Session()

summary_writer = tf.summary.FileWriter('log_simple_stats', sess.graph)

sess.run(tf.global_variables_initializer())

for i in range(100):
    summary_writer.add_summary(sess.run(summaries), i)
    sess.run(optimizer)


================================================
FILE: Chapter02/Python 3.5/tensor_flow_counter_1.py
================================================
import tensorflow as tf

value = tf.Variable(0, name="value")
one = tf.constant(1)
new_value = tf.add(value, one)
update_value = tf.assign(value, new_value)

initialize_var = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(initialize_var)
    print(sess.run(value))
    for _ in range(10):
        sess.run(update_value)
        print(sess.run(value))

"""
>>> 
0
1
2
3
4
5
6
7
8
9
10
>>>
"""     


================================================
FILE: Chapter02/Python 3.5/tensor_with_numpy_1.py
================================================
import tensorflow as tf
import numpy as np

#tensore 1d con valori costanti
tensor_1d = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
tensor_1d = tf.constant(tensor_1d)
with tf.Session() as sess:
    print(tensor_1d.get_shape())
    print(sess.run(tensor_1d))

#tensore 2d con valori variabili
tensor_2d = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)])
tensor_2d = tf.Variable(tensor_2d)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(tensor_2d.get_shape())
    print(sess.run(tensor_2d))


tensor_3d = np.array([[[ 0,  1,  2],[ 3,  4,  5],[ 6,  7,  8]],
                      [[ 9, 10, 11],[12, 13, 14],[15, 16, 17]],
                      [[18, 19, 20],[21, 22, 23],[24, 25, 26]]])

tensor_3d = tf.convert_to_tensor(tensor_3d, dtype=tf.float64)
with tf.Session() as sess:
    print(tensor_3d.get_shape())
    print(sess.run(tensor_3d))


interactive_session = tf.InteractiveSession()
tensor = np.array([1, 2, 3, 4, 5])
tensor = tf.constant(tensor)
print(tensor.eval())
interactive_session.close()

"""
Python 2.7.10 (default, Oct 14 2015, 16:09:02) 
[GCC 5.2.1 20151010] on linux2
Type "copyright", "credits" or "license()" for more information.
>>> ================================ RESTART ================================
>>> 
(10,)
[ 1  2  3  4  5  6  7  8  9 10]
(3, 3)
[[1 2 3]
 [4 5 6]
 [7 8 9]]
(3, 3, 3)
[[[  0.   1.   2.]
  [  3.   4.   5.]
  [  6.   7.   8.]]

 [[  9.  10.  11.]
  [ 12.  13.  14.]
  [ 15.  16.  17.]]

 [[ 18.  19.  20.]
  [ 21.  22.  23.]
  [ 24.  25.  26.]]]
[1 2 3 4 5]
>>> 
"""




================================================
FILE: Chapter03/Python 2.7/five_layers_relu_1.py
================================================
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import math

logs_path = 'log_simple_stats_5_layers_relu_softmax'
batch_size = 100
learning_rate = 0.5
training_epochs = 10

mnist = input_data.read_data_sets("/tmp/data", one_hot=True)

X = tf.placeholder(tf.float32, [None, 784])
Y_ = tf.placeholder(tf.float32, [None, 10])
lr = tf.placeholder(tf.float32)


L = 200
M = 100
N = 60
O = 30

W1 = tf.Variable(tf.truncated_normal([784, L], stddev=0.1))  
B1 = tf.Variable(tf.ones([L])/10)
W2 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
B2 = tf.Variable(tf.ones([M])/10)
W3 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
B3 = tf.Variable(tf.ones([N])/10)
W4 = tf.Variable(tf.truncated_normal([N, O], stddev=0.1))
B4 = tf.Variable(tf.ones([O])/10)
W5 = tf.Variable(tf.truncated_normal([O, 10], stddev=0.1))
B5 = tf.Variable(tf.zeros([10]))


XX = tf.reshape(X, [-1, 784])
Y1 = tf.nn.relu(tf.matmul(XX, W1) + B1)
Y2 = tf.nn.relu(tf.matmul(Y1, W2) + B2)
Y3 = tf.nn.relu(tf.matmul(Y2, W3) + B3)
Y4 = tf.nn.relu(tf.matmul(Y3, W4) + B4)
Ylogits = tf.matmul(Y4, W5) + B5
Y = tf.nn.softmax(Ylogits)

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100


correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))


train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge_all()

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    writer = tf.summary.FileWriter(logs_path, \
                                    graph=tf.get_default_graph())
    for epoch in range(training_epochs):
        batch_count = int(mnist.train.num_examples/batch_size)
        for i in range(batch_count):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            max_learning_rate = 0.003
            min_learning_rate = 0.0001
            decay_speed = 2000 
            learning_rate = min_learning_rate+\
                            (max_learning_rate - min_learning_rate)\
                            * math.exp(-i/decay_speed)
            _, summary = sess.run([train_step, summary_op],\
                                  {X: batch_x, Y_: batch_y,\
                                   lr: learning_rate})
            writer.add_summary(summary,\
                               epoch * batch_count + i)
        #if epoch % 2 == 0:
        print("Epoch: ", epoch)
           
    print("Accuracy: ", accuracy.eval(feed_dict={X: mnist.test.images, Y_: mnist.test.labels}))
    print("done")



================================================
FILE: Chapter03/Python 2.7/five_layers_relu_dropout_1.py
================================================
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import math

logs_path = 'log_simple_stats_5_lyers_dropout'
batch_size = 100
learning_rate = 0.5
training_epochs = 10

mnist = input_data.read_data_sets("/tmp/data", one_hot=True)

X = tf.placeholder(tf.float32, [None, 784])
Y_ = tf.placeholder(tf.float32, [None, 10])
lr = tf.placeholder(tf.float32)
pkeep = tf.placeholder(tf.float32)

L = 200
M = 100
N = 60
O = 30

W1 = tf.Variable(tf.truncated_normal([784, L], stddev=0.1))  
B1 = tf.Variable(tf.ones([L])/10)
W2 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
B2 = tf.Variable(tf.ones([M])/10)
W3 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
B3 = tf.Variable(tf.ones([N])/10)
W4 = tf.Variable(tf.truncated_normal([N, O], stddev=0.1))
B4 = tf.Variable(tf.ones([O])/10)
W5 = tf.Variable(tf.truncated_normal([O, 10], stddev=0.1))
B5 = tf.Variable(tf.zeros([10]))

XX = tf.reshape(X, [-1, 28*28])

Y1 = tf.nn.relu(tf.matmul(XX, W1) + B1)
Y1d = tf.nn.dropout(Y1, pkeep)

Y2 = tf.nn.relu(tf.matmul(Y1d, W2) + B2)
Y2d = tf.nn.dropout(Y2, pkeep)

Y3 = tf.nn.relu(tf.matmul(Y2d, W3) + B3)
Y3d = tf.nn.dropout(Y3, pkeep)

Y4 = tf.nn.relu(tf.matmul(Y3d, W4) + B4)
Y4d = tf.nn.dropout(Y4, pkeep)

Ylogits = tf.matmul(Y4d, W5) + B5
Y = tf.nn.softmax(Ylogits)

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100

correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge_all()

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)


with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    writer = tf.summary.FileWriter(logs_path, \
                                    graph=tf.get_default_graph())
    for epoch in range(training_epochs):
        batch_count = int(mnist.train.num_examples/batch_size)
        for i in range(batch_count):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            max_learning_rate = 0.003
            min_learning_rate = 0.0001
            decay_speed = 2000 
            learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-i/decay_speed)
            _, summary = sess.run([train_step, summary_op], {X: batch_x, Y_: batch_y, pkeep: 0.75, lr: learning_rate})
            writer.add_summary(summary,\
                               epoch * batch_count + i)
        print "Epoch: ", epoch
           
    print "Accuracy: ", accuracy.eval\
          (feed_dict={X: mnist.test.images, Y_: mnist.test.labels, pkeep: 0.75})
    print "done"



================================================
FILE: Chapter03/Python 2.7/five_layers_sigmoid_1.py
================================================
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import math

logs_path = 'log_simple_stats_5_layers_sigmoid'
batch_size = 100
learning_rate = 0.5
training_epochs = 10

mnist = input_data.read_data_sets("/tmp/data", one_hot=True)
X = tf.placeholder(tf.float32, [None, 784])
Y_ = tf.placeholder(tf.float32, [None, 10])

L = 200
M = 100
N = 60
O = 30

W1 = tf.Variable(tf.truncated_normal([784, L], stddev=0.1))  
B1 = tf.Variable(tf.zeros([L]))
W2 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
B2 = tf.Variable(tf.zeros([M]))
W3 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
B3 = tf.Variable(tf.zeros([N]))
W4 = tf.Variable(tf.truncated_normal([N, O], stddev=0.1))
B4 = tf.Variable(tf.zeros([O]))
W5 = tf.Variable(tf.truncated_normal([O, 10], stddev=0.1))
B5 = tf.Variable(tf.zeros([10]))


XX = tf.reshape(X, [-1, 784])
Y1 = tf.nn.sigmoid(tf.matmul(XX, W1) + B1)
Y2 = tf.nn.sigmoid(tf.matmul(Y1, W2) + B2)
Y3 = tf.nn.sigmoid(tf.matmul(Y2, W3) + B3)
Y4 = tf.nn.sigmoid(tf.matmul(Y3, W4) + B4)
Ylogits = tf.matmul(Y4, W5) + B5
Y = tf.nn.softmax(Ylogits)


cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
learning_rate = 0.003
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge_all()



init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
    for epoch in range(training_epochs):
        batch_count = int(mnist.train.num_examples/batch_size)
        for i in range(batch_count):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            _, summary = sess.run([train_step, summary_op],\
                                  feed_dict={X: batch_x,\
                                             Y_: batch_y})
            writer.add_summary(summary,\
                               epoch * batch_count + i)
        print("Epoch: ", epoch)
           
    print("Accuracy: ", accuracy.eval(feed_dict={X: mnist.test.images, Y_: mnist.test.labels}))
    print("done")



================================================
FILE: Chapter03/Python 2.7/softmax_classifier_1.py
================================================
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
from random import randint
import numpy as np

logs_path = 'log_mnist_softmax'
batch_size = 100
learning_rate = 0.5
training_epochs = 10
mnist = input_data.read_data_sets("data", one_hot=True)

X = tf.placeholder(tf.float32, [None, 784], name="input")
Y_ = tf.placeholder(tf.float32, [None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
XX = tf.reshape(X, [-1, 784])


Y = tf.nn.softmax(tf.matmul(XX, W) + b, name="output")
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_, logits=Y))
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

train_step = tf.train.GradientDescentOptimizer(0.005).minimize(cross_entropy)

tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge_all()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    writer = tf.summary.FileWriter(logs_path, \
                                    graph=tf.get_default_graph())
    for epoch in range(training_epochs):
        batch_count = int(mnist.train.num_examples/batch_size)
        for i in range(batch_count):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            _, summary = sess.run([train_step, summary_op],\
                                  feed_dict={X: batch_x,\
                                             Y_: batch_y})
            writer.add_summary(summary, epoch * batch_count + i)
        print("Epoch: ", epoch)
           
    print("Accuracy: ", accuracy.eval(feed_dict={X: mnist.test.images, Y_: mnist.test.labels}))
    print("done")
    
    num = randint(0, mnist.test.images.shape[0]) 
    img = mnist.test.images[num] 
 
    classification = sess.run(tf.argmax(Y, 1), feed_dict={X: [img]}) 
    print('Neural Network predicted', classification[0])
    print('Real label is:', np.argmax(mnist.test.labels[num]))

    saver = tf.train.Saver()
    save_path = saver.save(sess, "data/saved_mnist_cnn.ckpt")
    print("Model saved to %s" % save_path)




================================================
FILE: Chapter03/Python 2.7/softmax_model_loader_1.py
================================================
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from random import randint
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('data', one_hot=True)
sess = tf.InteractiveSession()
new_saver = tf.train.import_meta_graph('data/saved_mnist_cnn.ckpt.meta')
new_saver.restore(sess, 'data/saved_mnist_cnn.ckpt')
tf.get_default_graph().as_graph_def()

x = sess.graph.get_tensor_by_name("input:0")
y_conv = sess.graph.get_tensor_by_name("output:0")

num = randint(0, mnist.test.images.shape[0])
img = mnist.test.images[num]

result = sess.run(["input:0", y_conv], feed_dict= {x:img})
print(result)
print(sess.run(tf.argmax(result, 1)))

plt.imshow(image_b.reshape([28, 28]), cmap='Greys')
plt.show()







================================================
FILE: Chapter03/Python 2.7/softmax_model_saver_1.py
================================================
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
from random import randint
import numpy as np

logs_path = 'log_mnist_softmax'
batch_size = 100
learning_rate = 0.5
training_epochs = 10
mnist = input_data.read_data_sets("data", one_hot=True)

X = tf.placeholder(tf.float32, [None, 784], name="input")
Y_ = tf.placeholder(tf.float32, [None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
XX = tf.reshape(X, [-1, 784])

Y = tf.nn.softmax(tf.matmul(X, W) + b, name="output")
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_, logits=Y))
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

train_step = tf.train.GradientDescentOptimizer(0.005).minimize(cross_entropy)

tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge_all()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    writer = tf.summary.FileWriter(logs_path, \
                                   graph=tf.get_default_graph())
    for epoch in range(training_epochs):
        batch_count = int(mnist.train.num_examples / batch_size)
        for i in range(batch_count):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            _, summary = sess.run([train_step, summary_op], \
                                  feed_dict={X: batch_x, \
                                             Y_: batch_y})
            writer.add_summary(summary, epoch * batch_count + i)
        print("Epoch: ", epoch)

    print("Accuracy: ", accuracy.eval(feed_dict={X: mnist.test.images, Y_: mnist.test.labels}))
    print("done")

    num = randint(0, mnist.test.images.shape[0])
    img = mnist.test.images[num]

    classification = sess.run(tf.argmax(Y, 1), feed_dict={X: [img]})
    print('Neural Network predicted', classification[0])
    print('Real label is:', np.argmax(mnist.test.labels[num]))

    saver = tf.train.Saver()
    save_path = saver.save(sess, "data/saved_mnist_cnn.ckpt")
    print("Model saved to %s" % save_path)




================================================
FILE: Chapter03/Python 3.5/five_layers_relu_1.py
================================================
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import math

logs_path = 'log_simple_stats_5_layers_relu_softmax'
batch_size = 100
learning_rate = 0.5
training_epochs = 10

mnist = input_data.read_data_sets("/tmp/data", one_hot=True)

X = tf.placeholder(tf.float32, [None, 784])
Y_ = tf.placeholder(tf.float32, [None, 10])
lr = tf.placeholder(tf.float32)


L = 200
M = 100
N = 60
O = 30

W1 = tf.Variable(tf.truncated_normal([784, L], stddev=0.1))  
B1 = tf.Variable(tf.ones([L])/10)
W2 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
B2 = tf.Variable(tf.ones([M])/10)
W3 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
B3 = tf.Variable(tf.ones([N])/10)
W4 = tf.Variable(tf.truncated_normal([N, O], stddev=0.1))
B4 = tf.Variable(tf.ones([O])/10)
W5 = tf.Variable(tf.truncated_normal([O, 10], stddev=0.1))
B5 = tf.Variable(tf.zeros([10]))


XX = tf.reshape(X, [-1, 784])
Y1 = tf.nn.relu(tf.matmul(XX, W1) + B1)
Y2 = tf.nn.relu(tf.matmul(Y1, W2) + B2)
Y3 = tf.nn.relu(tf.matmul(Y2, W3) + B3)
Y4 = tf.nn.relu(tf.matmul(Y3, W4) + B4)
Ylogits = tf.matmul(Y4, W5) + B5
Y = tf.nn.softmax(Ylogits)

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100


correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))


train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge_all()

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    writer = tf.summary.FileWriter(logs_path, \
                                    graph=tf.get_default_graph())
    for epoch in range(training_epochs):
        batch_count = int(mnist.train.num_examples/batch_size)
        for i in range(batch_count):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            max_learning_rate = 0.003
            min_learning_rate = 0.0001
            decay_speed = 2000 
            learning_rate = min_learning_rate+\
                            (max_learning_rate - min_learning_rate)\
                            * math.exp(-i/decay_speed)
            _, summary = sess.run([train_step, summary_op],\
                                  {X: batch_x, Y_: batch_y,\
                                   lr: learning_rate})
            writer.add_summary(summary,\
                               epoch * batch_count + i)
        #if epoch % 2 == 0:
        print("Epoch: ", epoch)
           
    print("Accuracy: ", accuracy.eval(feed_dict={X: mnist.test.images, Y_: mnist.test.labels}))
    print("done")



================================================
FILE: Chapter03/Python 3.5/five_layers_relu_dropout_1.py
================================================
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import math

logs_path = 'log_simple_stats_5_lyers_dropout'
batch_size = 100
learning_rate = 0.5
training_epochs = 10

mnist = input_data.read_data_sets("/tmp/data", one_hot=True)

X = tf.placeholder(tf.float32, [None, 784])
Y_ = tf.placeholder(tf.float32, [None, 10])
lr = tf.placeholder(tf.float32)
pkeep = tf.placeholder(tf.float32)

L = 200
M = 100
N = 60
O = 30

W1 = tf.Variable(tf.truncated_normal([784, L], stddev=0.1))  
B1 = tf.Variable(tf.ones([L])/10)
W2 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
B2 = tf.Variable(tf.ones([M])/10)
W3 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
B3 = tf.Variable(tf.ones([N])/10)
W4 = tf.Variable(tf.truncated_normal([N, O], stddev=0.1))
B4 = tf.Variable(tf.ones([O])/10)
W5 = tf.Variable(tf.truncated_normal([O, 10], stddev=0.1))
B5 = tf.Variable(tf.zeros([10]))

XX = tf.reshape(X, [-1, 28*28])

Y1 = tf.nn.relu(tf.matmul(XX, W1) + B1)
Y1d = tf.nn.dropout(Y1, pkeep)

Y2 = tf.nn.relu(tf.matmul(Y1d, W2) + B2)
Y2d = tf.nn.dropout(Y2, pkeep)

Y3 = tf.nn.relu(tf.matmul(Y2d, W3) + B3)
Y3d = tf.nn.dropout(Y3, pkeep)

Y4 = tf.nn.relu(tf.matmul(Y3d, W4) + B4)
Y4d = tf.nn.dropout(Y4, pkeep)

Ylogits = tf.matmul(Y4d, W5) + B5
Y = tf.nn.softmax(Ylogits)

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100

correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge_all()

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)


with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    writer = tf.summary.FileWriter(logs_path, \
                                    graph=tf.get_default_graph())
    for epoch in range(training_epochs):
        batch_count = int(mnist.train.num_examples/batch_size)
        for i in range(batch_count):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            max_learning_rate = 0.003
            min_learning_rate = 0.0001
            decay_speed = 2000 
            learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-i/decay_speed)
            _, summary = sess.run([train_step, summary_op], {X: batch_x, Y_: batch_y, pkeep: 0.75, lr: learning_rate})
            writer.add_summary(summary,\
                               epoch * batch_count + i)
        print ("Epoch: ", epoch)
           
    print ("Accuracy: ", accuracy.eval\
          (feed_dict={X: mnist.test.images, Y_: mnist.test.labels, pkeep: 0.75}))
    print ("done")



================================================
FILE: Chapter03/Python 3.5/five_layers_sigmoid_1.py
================================================
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import math

logs_path = 'log_simple_stats_5_layers_sigmoid'
batch_size = 100
learning_rate = 0.5
training_epochs = 10

mnist = input_data.read_data_sets("/tmp/data", one_hot=True)
X = tf.placeholder(tf.float32, [None, 784])
Y_ = tf.placeholder(tf.float32, [None, 10])

L = 200
M = 100
N = 60
O = 30

W1 = tf.Variable(tf.truncated_normal([784, L], stddev=0.1))  
B1 = tf.Variable(tf.zeros([L]))
W2 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
B2 = tf.Variable(tf.zeros([M]))
W3 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
B3 = tf.Variable(tf.zeros([N]))
W4 = tf.Variable(tf.truncated_normal([N, O], stddev=0.1))
B4 = tf.Variable(tf.zeros([O]))
W5 = tf.Variable(tf.truncated_normal([O, 10], stddev=0.1))
B5 = tf.Variable(tf.zeros([10]))


XX = tf.reshape(X, [-1, 784])
Y1 = tf.nn.sigmoid(tf.matmul(XX, W1) + B1)
Y2 = tf.nn.sigmoid(tf.matmul(Y1, W2) + B2)
Y3 = tf.nn.sigmoid(tf.matmul(Y2, W3) + B3)
Y4 = tf.nn.sigmoid(tf.matmul(Y3, W4) + B4)
Ylogits = tf.matmul(Y4, W5) + B5
Y = tf.nn.softmax(Ylogits)


cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
learning_rate = 0.003
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge_all()



init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
    for epoch in range(training_epochs):
        batch_count = int(mnist.train.num_examples/batch_size)
        for i in range(batch_count):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            _, summary = sess.run([train_step, summary_op],\
                                  feed_dict={X: batch_x,\
                                             Y_: batch_y})
            writer.add_summary(summary,\
                               epoch * batch_count + i)
        print("Epoch: ", epoch)
           
    print("Accuracy: ", accuracy.eval(feed_dict={X: mnist.test.images, Y_: mnist.test.labels}))
    print("done")



================================================
FILE: Chapter03/Python 3.5/softmax_classifier_1.py
================================================
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
from random import randint
import numpy as np

logs_path = 'log_mnist_softmax'
batch_size = 100
learning_rate = 0.5
training_epochs = 10
mnist = input_data.read_data_sets("data", one_hot=True)

X = tf.placeholder(tf.float32, [None, 784], name="input")
Y_ = tf.placeholder(tf.float32, [None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
XX = tf.reshape(X, [-1, 784])


Y = tf.nn.softmax(tf.matmul(XX, W) + b, name="output")
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_, logits=Y))
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

train_step = tf.train.GradientDescentOptimizer(0.005).minimize(cross_entropy)

tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge_all()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    writer = tf.summary.FileWriter(logs_path, \
                                    graph=tf.get_default_graph())
    for epoch in range(training_epochs):
        batch_count = int(mnist.train.num_examples/batch_size)
        for i in range(batch_count):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            _, summary = sess.run([train_step, summary_op],\
                                  feed_dict={X: batch_x,\
                                             Y_: batch_y})
            writer.add_summary(summary, epoch * batch_count + i)
        print("Epoch: ", epoch)
           
    print("Accuracy: ", accuracy.eval(feed_dict={X: mnist.test.images, Y_: mnist.test.labels}))
    print("done")
    
    num = randint(0, mnist.test.images.shape[0]) 
    img = mnist.test.images[num] 
 
    classification = sess.run(tf.argmax(Y, 1), feed_dict={X: [img]}) 
    print('Neural Network predicted', classification[0])
    print('Real label is:', np.argmax(mnist.test.labels[num]))

    saver = tf.train.Saver()
    save_path = saver.save(sess, "data/saved_mnist_cnn.ckpt")
    print("Model saved to %s" % save_path)




================================================
FILE: Chapter03/Python 3.5/softmax_model_loader_1.py
================================================
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from random import randint
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('data', one_hot=True)
sess = tf.InteractiveSession()
new_saver = tf.train.import_meta_graph('data/saved_mnist_cnn.ckpt.meta')
new_saver.restore(sess, 'data/saved_mnist_cnn.ckpt')
tf.get_default_graph().as_graph_def()

x = sess.graph.get_tensor_by_name("input:0")
y_conv = sess.graph.get_tensor_by_name("output:0")

num = randint(0, mnist.test.images.shape[0])
img = mnist.test.images[num]

result = sess.run(["input:0", y_conv], feed_dict= {x:img})
print(result)
print(sess.run(tf.argmax(result, 1)))

plt.imshow(image_b.reshape([28, 28]), cmap='Greys')
plt.show()







================================================
FILE: Chapter03/Python 3.5/softmax_model_saver_1.py
================================================
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
from random import randint
import numpy as np

logs_path = 'log_mnist_softmax'
batch_size = 100
learning_rate = 0.5
training_epochs = 10
mnist = input_data.read_data_sets("data", one_hot=True)

X = tf.placeholder(tf.float32, [None, 784], name="input")
Y_ = tf.placeholder(tf.float32, [None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
XX = tf.reshape(X, [-1, 784])

Y = tf.nn.softmax(tf.matmul(X, W) + b, name="output")
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_, logits=Y))
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

train_step = tf.train.GradientDescentOptimizer(0.005).minimize(cross_entropy)

tf.summary.scalar("cost", cross_entropy)
tf.summary.scalar("accuracy", accuracy)
summary_op = tf.summary.merge_all()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    writer = tf.summary.FileWriter(logs_path, \
                                   graph=tf.get_default_graph())
    for epoch in range(training_epochs):
        batch_count = int(mnist.train.num_examples / batch_size)
        for i in range(batch_count):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            _, summary = sess.run([train_step, summary_op], \
                                  feed_dict={X: batch_x, \
                                             Y_: batch_y})
            writer.add_summary(summary, epoch * batch_count + i)
        print("Epoch: ", epoch)

    print("Accuracy: ", accuracy.eval(feed_dict={X: mnist.test.images, Y_: mnist.test.labels}))
    print("done")

    num = randint(0, mnist.test.images.shape[0])
    img = mnist.test.images[num]

    classification = sess.run(tf.argmax(Y, 1), feed_dict={X: [img]})
    print('Neural Network predicted', classification[0])
    print('Real label is:', np.argmax(mnist.test.labels[num]))

    saver = tf.train.Saver()
    save_path = saver.save(sess, "data/saved_mnist_cnn.ckpt")
    print("Model saved to %s" % save_path)




================================================
FILE: Chapter04/EMOTION_CNN/EmotionDetector/test.csv
================================================
[File too large to display: 10.3 MB]

================================================
FILE: Chapter04/EMOTION_CNN/EmotionDetector/train.csv
================================================
[File too large to display: 33.7 MB]

================================================
FILE: Chapter04/EMOTION_CNN/Python 2.7/EmotionDetectorUtils.py
================================================
import pandas as pd
import numpy as np
import os, sys, inspect
from six.moves import cPickle as pickle
import scipy.misc as misc

IMAGE_SIZE = 48
NUM_LABELS = 7
VALIDATION_PERCENT = 0.1  # use 10 percent of training images for validation

IMAGE_LOCATION_NORM = IMAGE_SIZE / 2

np.random.seed(0)

emotion = {0:'anger', 1:'disgust',\
           2:'fear',3:'happy',\
           4:'sad',5:'surprise',6:'neutral'}

class testResult:

    def __init__(self):
        self.anger = 0
        self.disgust = 0
        self.fear = 0
        self.happy = 0
        self.sad = 0
        self.surprise = 0
        self.neutral = 0
        
    def evaluate(self,label):
        
        if (0 == label):
            self.anger = self.anger+1
        if (1 == label):
            self.disgust = self.disgust+1
        if (2 == label):
            self.fear = self.fear+1
        if (3 == label):
            self.happy = self.happy+1
        if (4 == label):
            self.sad = self.sad+1
        if (5 == label):
            self.surprise = self.surprise+1
        if (6 == label):
            self.neutral = self.neutral+1

    def display_result(self,evaluations):
        print("anger = "    + str((self.anger/float(evaluations))*100)    + "%")
        print("disgust = "  + str((self.disgust/float(evaluations))*100)  + "%")
        print("fear = "     + str((self.fear/float(evaluations))*100)     + "%")
        print("happy = "    + str((self.happy/float(evaluations))*100)    + "%")
        print("sad = "      + str((self.sad/float(evaluations))*100)      + "%")
        print("surprise = " + str((self.surprise/float(evaluations))*100) + "%")
        print("neutral = "  + str((self.neutral/float(evaluations))*100)  + "%")
            

def read_data(data_dir, force=False):
    def create_onehot_label(x):
        label = np.zeros((1, NUM_LABELS), dtype=np.float32)
        label[:, int(x)] = 1
        return label

    pickle_file = os.path.join(data_dir, "EmotionDetectorData.pickle")
    if force or not os.path.exists(pickle_file):
        train_filename = os.path.join(data_dir, "train.csv")
        data_frame = pd.read_csv(train_filename)
        data_frame['Pixels'] = data_frame['Pixels'].apply(lambda x: np.fromstring(x, sep=" ") / 255.0)
        data_frame = data_frame.dropna()
        print("Reading train.csv ...")

        train_images = np.vstack(data_frame['Pixels']).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 1)
        print(train_images.shape)
        train_labels = np.array([map(create_onehot_label, data_frame['Emotion'].values)]).reshape(-1, NUM_LABELS)
        print(train_labels.shape)

        permutations = np.random.permutation(train_images.shape[0])
        train_images = train_images[permutations]
        train_labels = train_labels[permutations]
        validation_percent = int(train_images.shape[0] * VALIDATION_PERCENT)
        validation_images = train_images[:validation_percent]
        validation_labels = train_labels[:validation_percent]
        train_images = train_images[validation_percent:]
        train_labels = train_labels[validation_percent:]

        print("Reading test.csv ...")
        test_filename = os.path.join(data_dir, "test.csv")
        data_frame = pd.read_csv(test_filename)
        data_frame['Pixels'] = data_frame['Pixels'].apply(lambda x: np.fromstring(x, sep=" ") / 255.0)
        data_frame = data_frame.dropna()
        test_images = np.vstack(data_frame['Pixels']).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 1)

        with open(pickle_file, "wb") as file:
            try:
                print('Picking ...')
                save = {
                    "train_images": train_images,
                    "train_labels": train_labels,
                    "validation_images": validation_images,
                    "validation_labels": validation_labels,
                    "test_images": test_images,
                }
                pickle.dump(save, file, pickle.HIGHEST_PROTOCOL)

            except:
                print("Unable to pickle file :/")

    with open(pickle_file, "rb") as file:
        save = pickle.load(file)
        train_images = save["train_images"]
        train_labels = save["train_labels"]
        validation_images = save["validation_images"]
        validation_labels = save["validation_labels"]
        test_images = save["test_images"]

    return train_images, train_labels, validation_images, validation_labels, test_images


================================================
FILE: Chapter04/EMOTION_CNN/Python 2.7/EmotionDetector_1.py
================================================
import tensorflow as tf
import numpy as np
#import os, sys, inspect
from datetime import datetime
import EmotionDetectorUtils

"""
lib_path = os.path.realpath(
    os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if lib_path not in sys.path:
    sys.path.insert(0, lib_path)
"""


FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("data_dir", "EmotionDetector/", "Path to data files")
tf.flags.DEFINE_string("logs_dir", "logs/EmotionDetector_logs/", "Path to where log files are to be saved")
tf.flags.DEFINE_string("mode", "train", "mode: train (Default)/ test")

BATCH_SIZE = 128
LEARNING_RATE = 1e-3
MAX_ITERATIONS = 1001
REGULARIZATION = 1e-2
IMAGE_SIZE = 48
NUM_LABELS = 7
VALIDATION_PERCENT = 0.1


def add_to_regularization_loss(W, b):
    tf.add_to_collection("losses", tf.nn.l2_loss(W))
    tf.add_to_collection("losses", tf.nn.l2_loss(b))

def weight_variable(shape, stddev=0.02, name=None):
    initial = tf.truncated_normal(shape, stddev=stddev)
    if name is None:
        return tf.Variable(initial)
    else:
        return tf.get_variable(name, initializer=initial)


def bias_variable(shape, name=None):
    initial = tf.constant(0.0, shape=shape)
    if name is None:
        return tf.Variable(initial)
    else:
        return tf.get_variable(name, initializer=initial)

def conv2d_basic(x, W, bias):
    conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME")
    return tf.nn.bias_add(conv, bias)

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], \
                          strides=[1, 2, 2, 1], padding="SAME")


def emotion_cnn(dataset):
    with tf.name_scope("conv1") as scope:
        #W_conv1 = weight_variable([5, 5, 1, 32])
        #b_conv1 = bias_variable([32])
        tf.summary.histogram("W_conv1", weights['wc1'])
        tf.summary.histogram("b_conv1", biases['bc1'])
        conv_1 = tf.nn.conv2d(dataset, weights['wc1'],\
                              strides=[1, 1, 1, 1], padding="SAME")
        h_conv1 = tf.nn.bias_add(conv_1, biases['bc1'])
        #h_conv1 = conv2d_basic(dataset, W_conv1, b_conv1)
        h_1 = tf.nn.relu(h_conv1)
        h_pool1 = max_pool_2x2(h_1)
        add_to_regularization_loss(weights['wc1'], biases['bc1'])

    with tf.name_scope("conv2") as scope:
        #W_conv2 = weight_variable([3, 3, 32, 64])
        #b_conv2 = bias_variable([64])
        tf.summary.histogram("W_conv2", weights['wc2'])
        tf.summary.histogram("b_conv2", biases['bc2'])
        conv_2 = tf.nn.conv2d(h_pool1, weights['wc2'], strides=[1, 1, 1, 1], padding="SAME")
        h_conv2 = tf.nn.bias_add(conv_2, biases['bc2'])
        #h_conv2 = conv2d_basic(h_pool1, weights['wc2'], biases['bc2'])
        h_2 = tf.nn.relu(h_conv2)
        h_pool2 = max_pool_2x2(h_2)
        add_to_regularization_loss(weights['wc2'], biases['bc2'])

    with tf.name_scope("fc_1") as scope:
        prob = 0.5
        image_size = IMAGE_SIZE / 4
        h_flat = tf.reshape(h_pool2, [-1, image_size * image_size * 64])
        #W_fc1 = weight_variable([image_size * image_size * 64, 256])
        #b_fc1 = bias_variable([256])
        tf.summary.histogram("W_fc1", weights['wf1'])
        tf.summary.histogram("b_fc1", biases['bf1'])
        h_fc1 = tf.nn.relu(tf.matmul(h_flat, weights['wf1']) + biases['bf1'])
        h_fc1_dropout = tf.nn.dropout(h_fc1, prob)
        
    with tf.name_scope("fc_2") as scope:
        #W_fc2 = weight_variable([256, NUM_LABELS])
        #b_fc2 = bias_variable([NUM_LABELS])
        tf.summary.histogram("W_fc2", weights['wf2'])
        tf.summary.histogram("b_fc2", biases['bf2'])
        #pred = tf.matmul(h_fc1, weights['wf2']) + biases['bf2']
        pred = tf.matmul(h_fc1_dropout, weights['wf2']) + biases['bf2']

    return pred

weights = {
    'wc1': weight_variable([5, 5, 1, 32], name="W_conv1"),
    'wc2': weight_variable([3, 3, 32, 64],name="W_conv2"),
    'wf1': weight_variable([(IMAGE_SIZE / 4) * (IMAGE_SIZE / 4) * 64, 256],name="W_fc1"),
    'wf2': weight_variable([256, NUM_LABELS], name="W_fc2")
}

biases = {
    'bc1': bias_variable([32], name="b_conv1"),
    'bc2': bias_variable([64], name="b_conv2"),
    'bf1': bias_variable([256], name="b_fc1"),
    'bf2': bias_variable([NUM_LABELS], name="b_fc2")
}

def loss(pred, label):
    cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=label))
    tf.summary.scalar('Entropy', cross_entropy_loss)
    reg_losses = tf.add_n(tf.get_collection("losses"))
    tf.summary.scalar('Reg_loss', reg_losses)
    return cross_entropy_loss + REGULARIZATION * reg_losses


def train(loss, step):
    return tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss, global_step=step)


def get_next_batch(images, labels, step):
    offset = (step * BATCH_SIZE) % (images.shape[0] - BATCH_SIZE)
    batch_images = images[offset: offset + BATCH_SIZE]
    batch_labels = labels[offset:offset + BATCH_SIZE]
    return batch_images, batch_labels


def main(argv=None):
    train_images, train_labels, valid_images, valid_labels, test_images = EmotionDetectorUtils.read_data(FLAGS.data_dir)
    print("Train size: %s" % train_images.shape[0])
    print('Validation size: %s' % valid_images.shape[0])
    print("Test size: %s" % test_images.shape[0])

    global_step = tf.Variable(0, trainable=False)
    dropout_prob = tf.placeholder(tf.float32)
    input_dataset = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 1],name="input")
    input_labels = tf.placeholder(tf.float32, [None, NUM_LABELS])

    pred = emotion_cnn(input_dataset)
    output_pred = tf.nn.softmax(pred,name="output")
    loss_val = loss(pred, input_labels)
    train_op = train(loss_val, global_step)

    summary_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        summary_writer = tf.summary.FileWriter(FLAGS.logs_dir, sess.graph_def)
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model Restored!")

        for step in range(MAX_ITERATIONS):
            batch_image, batch_label = get_next_batch(train_images, train_labels, step)
            feed_dict = {input_dataset: batch_image, input_labels: batch_label}

            sess.run(train_op, feed_dict=feed_dict)
            if step % 10 == 0:
                train_loss, summary_str = sess.run([loss_val, summary_op], feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, global_step=step)
                print("Training Loss: %f" % train_loss)

            if step % 100 == 0:
                valid_loss = sess.run(loss_val, feed_dict={input_dataset: valid_images, input_labels: valid_labels})
                print("%s Validation Loss: %f" % (datetime.now(), valid_loss))
                saver.save(sess, FLAGS.logs_dir + 'model.ckpt', global_step=step)


if __name__ == "__main__":
    tf.app.run()



"""
>>> 
Train size: 3761
Validation size: 417
Test size: 1312
WARNING:tensorflow:Passing a `GraphDef` to the SummaryWriter is deprecated. Pass a `Graph` object instead, such as `sess.graph`.
Training Loss: 1.962236
2016-11-05 22:39:36.645682 Validation Loss: 1.962719
Training Loss: 1.907290
Training Loss: 1.849100
Training Loss: 1.871116
Training Loss: 1.798998
Training Loss: 1.885601
Training Loss: 1.849380
Training Loss: 1.843139
Training Loss: 1.933691
Training Loss: 1.829839
Training Loss: 1.839772
2016-11-05 22:42:58.951699 Validation Loss: 1.822431
Training Loss: 1.772197
Training Loss: 1.666473
Training Loss: 1.620869
Training Loss: 1.592660
Training Loss: 1.422701
Training Loss: 1.436721
Training Loss: 1.348217
Training Loss: 1.432023
Training Loss: 1.347753
Training Loss: 1.299889
2016-11-05 22:46:55.144483 Validation Loss: 1.335237
Training Loss: 1.108747
Training Loss: 1.197601
Training Loss: 1.245860
Training Loss: 1.164120
Training Loss: 0.994351
Training Loss: 1.072356
Training Loss: 1.193485
Training Loss: 1.118093
Training Loss: 1.021220
Training Loss: 1.069752
2016-11-05 22:50:17.677074 Validation Loss: 1.111559
Training Loss: 1.099430
Training Loss: 0.966327
Training Loss: 0.960916
Training Loss: 0.844742
Training Loss: 0.979741
Training Loss: 0.891897
Training Loss: 1.013132
Training Loss: 0.936738
Training Loss: 0.911577
Training Loss: 0.862605
2016-11-05 22:53:30.999141 Validation Loss: 0.999061
Training Loss: 0.800337
Training Loss: 0.776097
Training Loss: 0.799260
Training Loss: 0.919926
Training Loss: 0.758807
Training Loss: 0.807968
Training Loss: 0.856378
Training Loss: 0.867762
Training Loss: 0.656170
Training Loss: 0.688761
2016-11-05 22:56:53.256991 Validation Loss: 0.931223
Training Loss: 0.696454
Training Loss: 0.725157
Training Loss: 0.674037
Training Loss: 0.719200
Training Loss: 0.749460
Training Loss: 0.741768
Training Loss: 0.702719
Training Loss: 0.734194
Training Loss: 0.669155
Training Loss: 0.641528
2016-11-05 23:00:06.530139 Validation Loss: 0.911489
Training Loss: 0.764550
Training Loss: 0.646964
Training Loss: 0.724712
Training Loss: 0.726692
Training Loss: 0.656019
Training Loss: 0.690552
Training Loss: 0.537638
Training Loss: 0.680097
Training Loss: 0.554115
Training Loss: 0.590837
2016-11-05 23:03:15.351156 Validation Loss: 0.818303
Training Loss: 0.656608
Training Loss: 0.567394
Training Loss: 0.545324
Training Loss: 0.611726
Training Loss: 0.600910
Training Loss: 0.526467
Training Loss: 0.584986
Training Loss: 0.567015
Training Loss: 0.555465
Training Loss: 0.630097
2016-11-05 23:06:26.575298 Validation Loss: 0.824178
Training Loss: 0.662920
Training Loss: 0.512493
Training Loss: 0.475912
Training Loss: 0.455112
Training Loss: 0.567875
Training Loss: 0.582927
Training Loss: 0.509225
Training Loss: 0.602916
Training Loss: 0.521976
Training Loss: 0.445122
2016-11-05 23:09:40.136353 Validation Loss: 0.803449
Training Loss: 0.435535
Training Loss: 0.459343
Training Loss: 0.481706
Training Loss: 0.460640
Training Loss: 0.554570
Training Loss: 0.427962
Training Loss: 0.512764
Training Loss: 0.531128
Training Loss: 0.364465
Training Loss: 0.432366
2016-11-05 23:12:50.769527 Validation Loss: 0.851074
>>> 
"""


================================================
FILE: Chapter04/EMOTION_CNN/Python 2.7/test_your_image.py
================================================
from scipy import misc
import numpy as np
import matplotlib.cm as cm
import tensorflow as tf
import os, sys, inspect
from datetime import datetime
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
from scipy import misc
import EmotionDetectorUtils
from EmotionDetectorUtils import testResult

emotion = {0:'anger', 1:'disgust',\
           2:'fear',3:'happy',\
           4:'sad',5:'surprise',6:'neutral'}


def rgb2gray(rgb):
    return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])

img = mpimg.imread('author_img.jpg')     
gray = rgb2gray(img)
plt.imshow(gray, cmap = plt.get_cmap('gray'))
plt.show()


""""
lib_path = os.path.realpath(
    os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if lib_path not in sys.path:
    sys.path.insert(0, lib_path)
"""



FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("data_dir", "EmotionDetector/", "Path to data files")
tf.flags.DEFINE_string("logs_dir", "logs/EmotionDetector_logs/", "Path to where log files are to be saved")
tf.flags.DEFINE_string("mode", "train", "mode: train (Default)/ test")




train_images, train_labels, valid_images, valid_labels, test_images = \
                  EmotionDetectorUtils.read_data(FLAGS.data_dir)


sess = tf.InteractiveSession()

new_saver = tf.train.import_meta_graph('logs/EmotionDetector_logs/model.ckpt-1000.meta')
new_saver.restore(sess, 'logs/EmotionDetector_logs/model.ckpt-1000')
tf.get_default_graph().as_graph_def()

x = sess.graph.get_tensor_by_name("input:0")
y_conv = sess.graph.get_tensor_by_name("output:0")

image_0 = np.resize(gray,(1,48,48,1))
tResult = testResult()
num_evaluations = 1000

for i in range(0,num_evaluations):
	result = sess.run(y_conv, feed_dict={x:image_0})
	label = sess.run(tf.argmax(result, 1))
	label = label[0]
	label = int(label)
	tResult.evaluate(label)
tResult.display_result(num_evaluations)






================================================
FILE: Chapter04/EMOTION_CNN/Python 3.5/EmotionDetectorUtils.py
================================================
import pandas as pd
import numpy as np
import os, sys, inspect
from six.moves import cPickle as pickle
import scipy.misc as misc

IMAGE_SIZE = 48
NUM_LABELS = 7
VALIDATION_PERCENT = 0.1  # use 10 percent of training images for validation

IMAGE_LOCATION_NORM = IMAGE_SIZE // 2

np.random.seed(0)

emotion = {0:'anger', 1:'disgust',\
           2:'fear',3:'happy',\
           4:'sad',5:'surprise',6:'neutral'}

class testResult:

    def __init__(self):
        self.anger = 0
        self.disgust = 0
        self.fear = 0
        self.happy = 0
        self.sad = 0
        self.surprise = 0
        self.neutral = 0
        
    def evaluate(self,label):
        
        if (0 == label):
            self.anger = self.anger+1
        if (1 == label):
            self.disgust = self.disgust+1
        if (2 == label):
            self.fear = self.fear+1
        if (3 == label):
            self.happy = self.happy+1
        if (4 == label):
            self.sad = self.sad+1
        if (5 == label):
            self.surprise = self.surprise+1
        if (6 == label):
            self.neutral = self.neutral+1

    def display_result(self,evaluations):
        print("anger = "    + str((self.anger/float(evaluations))*100)    + "%")
        print("disgust = "  + str((self.disgust/float(evaluations))*100)  + "%")
        print("fear = "     + str((self.fear/float(evaluations))*100)     + "%")
        print("happy = "    + str((self.happy/float(evaluations))*100)    + "%")
        print("sad = "      + str((self.sad/float(evaluations))*100)      + "%")
        print("surprise = " + str((self.surprise/float(evaluations))*100) + "%")
        print("neutral = "  + str((self.neutral/float(evaluations))*100)  + "%")
            

def read_data(data_dir, force=False):
    def create_onehot_label(x):
        label = np.zeros((1, NUM_LABELS), dtype=np.float32)
        label[:, int(x)] = 1
        return label

    pickle_file = os.path.join(data_dir, "EmotionDetectorData.pickle")
    if force or not os.path.exists(pickle_file):
        train_filename = os.path.join(data_dir, "train.csv")
        data_frame = pd.read_csv(train_filename)
        data_frame['Pixels'] = data_frame['Pixels'].apply(lambda x: np.fromstring(x, sep=" ") / 255.0)
        data_frame = data_frame.dropna()
        print("Reading train.csv ...")

        train_images = np.vstack(data_frame['Pixels']).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 1)
        print(train_images.shape)
        train_labels = np.array(list(map(create_onehot_label, data_frame['Emotion'].values))).reshape(-1, NUM_LABELS)
        print(train_labels.shape)

        permutations = np.random.permutation(train_images.shape[0])
        train_images = train_images[permutations]
        train_labels = train_labels[permutations]
        validation_percent = int(train_images.shape[0] * VALIDATION_PERCENT)
        validation_images = train_images[:validation_percent]
        validation_labels = train_labels[:validation_percent]
        train_images = train_images[validation_percent:]
        train_labels = train_labels[validation_percent:]

        print("Reading test.csv ...")
        test_filename = os.path.join(data_dir, "test.csv")
        data_frame = pd.read_csv(test_filename)
        data_frame['Pixels'] = data_frame['Pixels'].apply(lambda x: np.fromstring(x, sep=" ") / 255.0)
        data_frame = data_frame.dropna()
        test_images = np.vstack(data_frame['Pixels']).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 1)

        with open(pickle_file, "wb") as file:
            try:
                print('Picking ...')
                save = {
                    "train_images": train_images,
                    "train_labels": train_labels,
                    "validation_images": validation_images,
                    "validation_labels": validation_labels,
                    "test_images": test_images,
                }
                pickle.dump(save, file, pickle.HIGHEST_PROTOCOL)

            except:
                print("Unable to pickle file :/")

    with open(pickle_file, "rb") as file:
        save = pickle.load(file)
        train_images = save["train_images"]
        train_labels = save["train_labels"]
        validation_images = save["validation_images"]
        validation_labels = save["validation_labels"]
        test_images = save["test_images"]

    return train_images, train_labels, validation_images, validation_labels, test_images


================================================
FILE: Chapter04/EMOTION_CNN/Python 3.5/EmotionDetector_1.py
================================================
import tensorflow as tf
import numpy as np
#import os, sys, inspect
from datetime import datetime
import EmotionDetectorUtils

"""
lib_path = os.path.realpath(
    os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if lib_path not in sys.path:
    sys.path.insert(0, lib_path)
"""


FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("data_dir", "EmotionDetector/", "Path to data files")
tf.flags.DEFINE_string("logs_dir", "logs/EmotionDetector_logs/", "Path to where log files are to be saved")
tf.flags.DEFINE_string("mode", "train", "mode: train (Default)/ test")

BATCH_SIZE = 128
LEARNING_RATE = 1e-3
MAX_ITERATIONS = 1001
REGULARIZATION = 1e-2
IMAGE_SIZE = 48
NUM_LABELS = 7
VALIDATION_PERCENT = 0.1


def add_to_regularization_loss(W, b):
    tf.add_to_collection("losses", tf.nn.l2_loss(W))
    tf.add_to_collection("losses", tf.nn.l2_loss(b))

def weight_variable(shape, stddev=0.02, name=None):
    initial = tf.truncated_normal(shape, stddev=stddev)
    if name is None:
        return tf.Variable(initial)
    else:
        return tf.get_variable(name, initializer=initial)


def bias_variable(shape, name=None):
    initial = tf.constant(0.0, shape=shape)
    if name is None:
        return tf.Variable(initial)
    else:
        return tf.get_variable(name, initializer=initial)

def conv2d_basic(x, W, bias):
    conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME")
    return tf.nn.bias_add(conv, bias)

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], \
                          strides=[1, 2, 2, 1], padding="SAME")


def emotion_cnn(dataset):
    with tf.name_scope("conv1") as scope:
        #W_conv1 = weight_variable([5, 5, 1, 32])
        #b_conv1 = bias_variable([32])
        tf.summary.histogram("W_conv1", weights['wc1'])
        tf.summary.histogram("b_conv1", biases['bc1'])
        conv_1 = tf.nn.conv2d(dataset, weights['wc1'],\
                              strides=[1, 1, 1, 1], padding="SAME")
        h_conv1 = tf.nn.bias_add(conv_1, biases['bc1'])
        #h_conv1 = conv2d_basic(dataset, W_conv1, b_conv1)
        h_1 = tf.nn.relu(h_conv1)
        h_pool1 = max_pool_2x2(h_1)
        add_to_regularization_loss(weights['wc1'], biases['bc1'])

    with tf.name_scope("conv2") as scope:
        #W_conv2 = weight_variable([3, 3, 32, 64])
        #b_conv2 = bias_variable([64])
        tf.summary.histogram("W_conv2", weights['wc2'])
        tf.summary.histogram("b_conv2", biases['bc2'])
        conv_2 = tf.nn.conv2d(h_pool1, weights['wc2'], strides=[1, 1, 1, 1], padding="SAME")
        h_conv2 = tf.nn.bias_add(conv_2, biases['bc2'])
        #h_conv2 = conv2d_basic(h_pool1, weights['wc2'], biases['bc2'])
        h_2 = tf.nn.relu(h_conv2)
        h_pool2 = max_pool_2x2(h_2)
        add_to_regularization_loss(weights['wc2'], biases['bc2'])

    with tf.name_scope("fc_1") as scope:
        prob = 0.5
        image_size = IMAGE_SIZE // 4
        h_flat = tf.reshape(h_pool2, [-1, image_size * image_size * 64])
        #W_fc1 = weight_variable([image_size * image_size * 64, 256])
        #b_fc1 = bias_variable([256])
        tf.summary.histogram("W_fc1", weights['wf1'])
        tf.summary.histogram("b_fc1", biases['bf1'])
        h_fc1 = tf.nn.relu(tf.matmul(h_flat, weights['wf1']) + biases['bf1'])
        h_fc1_dropout = tf.nn.dropout(h_fc1, prob)
        
    with tf.name_scope("fc_2") as scope:
        #W_fc2 = weight_variable([256, NUM_LABELS])
        #b_fc2 = bias_variable([NUM_LABELS])
        tf.summary.histogram("W_fc2", weights['wf2'])
        tf.summary.histogram("b_fc2", biases['bf2'])
        #pred = tf.matmul(h_fc1, weights['wf2']) + biases['bf2']
        pred = tf.matmul(h_fc1_dropout, weights['wf2']) + biases['bf2']

    return pred

weights = {
    'wc1': weight_variable([5, 5, 1, 32], name="W_conv1"),
    'wc2': weight_variable([3, 3, 32, 64],name="W_conv2"),
    'wf1': weight_variable([(IMAGE_SIZE // 4) * (IMAGE_SIZE // 4) * 64, 256],name="W_fc1"),
    'wf2': weight_variable([256, NUM_LABELS], name="W_fc2")
}

biases = {
    'bc1': bias_variable([32], name="b_conv1"),
    'bc2': bias_variable([64], name="b_conv2"),
    'bf1': bias_variable([256], name="b_fc1"),
    'bf2': bias_variable([NUM_LABELS], name="b_fc2")
}

def loss(pred, label):
    cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=label))
    tf.summary.scalar('Entropy', cross_entropy_loss)
    reg_losses = tf.add_n(tf.get_collection("losses"))
    tf.summary.scalar('Reg_loss', reg_losses)
    return cross_entropy_loss + REGULARIZATION * reg_losses


def train(loss, step):
    return tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss, global_step=step)


def get_next_batch(images, labels, step):
    offset = (step * BATCH_SIZE) % (images.shape[0] - BATCH_SIZE)
    batch_images = images[offset: offset + BATCH_SIZE]
    batch_labels = labels[offset:offset + BATCH_SIZE]
    return batch_images, batch_labels


def main(argv=None):
    train_images, train_labels, valid_images, valid_labels, test_images = EmotionDetectorUtils.read_data(FLAGS.data_dir)
    print("Train size: %s" % train_images.shape[0])
    print('Validation size: %s' % valid_images.shape[0])
    print("Test size: %s" % test_images.shape[0])

    global_step = tf.Variable(0, trainable=False)
    dropout_prob = tf.placeholder(tf.float32)
    input_dataset = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 1],name="input")
    input_labels = tf.placeholder(tf.float32, [None, NUM_LABELS])

    pred = emotion_cnn(input_dataset)
    output_pred = tf.nn.softmax(pred,name="output")
    loss_val = loss(pred, input_labels)
    train_op = train(loss_val, global_step)

    summary_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        summary_writer = tf.summary.FileWriter(FLAGS.logs_dir, sess.graph_def)
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model Restored!")

        for step in range(MAX_ITERATIONS):
            batch_image, batch_label = get_next_batch(train_images, train_labels, step)
            feed_dict = {input_dataset: batch_image, input_labels: batch_label}

            sess.run(train_op, feed_dict=feed_dict)
            if step % 10 == 0:
                train_loss, summary_str = sess.run([loss_val, summary_op], feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, global_step=step)
                print("Training Loss: %f" % train_loss)

            if step % 100 == 0:
                valid_loss = sess.run(loss_val, feed_dict={input_dataset: valid_images, input_labels: valid_labels})
                print("%s Validation Loss: %f" % (datetime.now(), valid_loss))
                saver.save(sess, FLAGS.logs_dir + 'model.ckpt', global_step=step)


if __name__ == "__main__":
    tf.app.run()



"""
>>> 
Train size: 3761
Validation size: 417
Test size: 1312
Training Loss: 1.951450
2017-07-27 14:26:41.689096 Validation Loss: 1.958948
Training Loss: 1.899691
Training Loss: 1.873583
Training Loss: 1.883454
Training Loss: 1.794849
Training Loss: 1.884183
Training Loss: 1.848423
Training Loss: 1.838916
Training Loss: 1.918565
Training Loss: 1.829074
Training Loss: 1.864008
2017-07-27 14:27:00.305351 Validation Loss: 1.790150
Training Loss: 1.753058
Training Loss: 1.615597
Training Loss: 1.571414
Training Loss: 1.623350
Training Loss: 1.494578
Training Loss: 1.502531
Training Loss: 1.349338
Training Loss: 1.537164
Training Loss: 1.364067
Training Loss: 1.387331
2017-07-27 14:27:20.328279 Validation Loss: 1.375231
Training Loss: 1.186529
Training Loss: 1.386529
Training Loss: 1.270537
Training Loss: 1.211034
Training Loss: 1.096524
Training Loss: 1.192567
Training Loss: 1.279141
Training Loss: 1.199098
Training Loss: 1.017902
Training Loss: 1.249009
2017-07-27 14:27:38.844167 Validation Loss: 1.178693
Training Loss: 1.222699
Training Loss: 0.970940
Training Loss: 1.012443
Training Loss: 0.931900
Training Loss: 1.016142
Training Loss: 0.943123
Training Loss: 1.099365
Training Loss: 1.000534
Training Loss: 0.925840
Training Loss: 0.895967
2017-07-27 14:27:57.399234 Validation Loss: 1.103102
Training Loss: 0.863209
Training Loss: 0.833549
Training Loss: 0.812724
Training Loss: 1.009514
Training Loss: 1.024465
Training Loss: 0.961753
Training Loss: 0.986352
Training Loss: 0.959654
Training Loss: 0.774006
Training Loss: 0.858462
2017-07-27 14:28:15.782431 Validation Loss: 1.000128
Training Loss: 0.663166
Training Loss: 0.785379
Training Loss: 0.821995
Training Loss: 0.945040
Training Loss: 0.909402
Training Loss: 0.797702
Training Loss: 0.769628
Training Loss: 0.750213
Training Loss: 0.722645
Training Loss: 0.800091
2017-07-27 14:28:34.632889 Validation Loss: 0.924810
Training Loss: 0.878261
Training Loss: 0.817574
Training Loss: 0.856897
Training Loss: 0.752512
Training Loss: 0.881165
Training Loss: 0.710394
Training Loss: 0.721797
Training Loss: 0.726897
Training Loss: 0.624348
Training Loss: 0.730256
2017-07-27 14:28:53.171239 Validation Loss: 0.901341
Training Loss: 0.685925
Training Loss: 0.630337
Training Loss: 0.656826
Training Loss: 0.666020
Training Loss: 0.627277
Training Loss: 0.698149
Training Loss: 0.722851
Training Loss: 0.722231
Training Loss: 0.701155
Training Loss: 0.684319
2017-07-27 14:29:11.596521 Validation Loss: 0.894154
Training Loss: 0.738686
Training Loss: 0.580629
Training Loss: 0.545667
Training Loss: 0.614124
Training Loss: 0.640999
Training Loss: 0.762669
Training Loss: 0.628534
Training Loss: 0.690788
Training Loss: 0.628837
Training Loss: 0.565587
2017-07-27 14:29:30.075707 Validation Loss: 0.825970
Training Loss: 0.551373
Training Loss: 0.466755
Training Loss: 0.583116
Training Loss: 0.644869
Training Loss: 0.626141
Training Loss: 0.609953
Training Loss: 0.622723
Training Loss: 0.696944
Training Loss: 0.543604
Training Loss: 0.436234
2017-07-27 14:29:48.517299 Validation Loss: 0.873586

>>> 
"""


================================================
FILE: Chapter04/EMOTION_CNN/Python 3.5/__init__.py
================================================


================================================
FILE: Chapter04/EMOTION_CNN/Python 3.5/test_your_image.py
================================================
from scipy import misc
import numpy as np
import matplotlib.cm as cm
import tensorflow as tf
import os, sys, inspect
from datetime import datetime
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
from scipy import misc
import EmotionDetectorUtils
from EmotionDetectorUtils import testResult

emotion = {0:'anger', 1:'disgust',\
           2:'fear',3:'happy',\
           4:'sad',5:'surprise',6:'neutral'}


def rgb2gray(rgb):
    return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])

img = mpimg.imread('author_img.jpg')     
gray = rgb2gray(img)
plt.imshow(gray, cmap = plt.get_cmap('gray'))
plt.show()


""""
lib_path = os.path.realpath(
    os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if lib_path not in sys.path:
    sys.path.insert(0, lib_path)
"""



FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("data_dir", "EmotionDetector/", "Path to data files")
tf.flags.DEFINE_string("logs_dir", "logs/EmotionDetector_logs/", "Path to where log files are to be saved")
tf.flags.DEFINE_string("mode", "train", "mode: train (Default)/ test")




train_images, train_labels, valid_images, valid_labels, test_images = \
                  EmotionDetectorUtils.read_data(FLAGS.data_dir)


sess = tf.InteractiveSession()

new_saver = tf.train.import_meta_graph('logs/EmotionDetector_logs/model.ckpt-1000.meta')
new_saver.restore(sess, 'logs/EmotionDetector_logs/model.ckpt-1000')
tf.get_default_graph().as_graph_def()

x = sess.graph.get_tensor_by_name("input:0")
y_conv = sess.graph.get_tensor_by_name("output:0")

image_0 = np.resize(gray,(1,48,48,1))
tResult = testResult()
num_evaluations = 1000

for i in range(0,num_evaluations):
	result = sess.run(y_conv, feed_dict={x:image_0})
	label = sess.run(tf.argmax(result, 1))
	label = label[0]
	label = int(label)
	tResult.evaluate(label)
tResult.display_result(num_evaluations)






================================================
FILE: Chapter04/MNIST_CNN/Python 2.7/mnist_cnn_1.py
================================================
import tensorflow as tf
import numpy as np
#import mnist_data 

batch_size = 128
test_size = 256
img_size = 28
num_classes = 10

def init_weights(shape):
    return tf.Variable(tf.random_normal(shape, stddev=0.01))


def model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):

    conv1 = tf.nn.conv2d(X, w,\
                         strides=[1, 1, 1, 1],\
                         padding='SAME')

    conv1_a = tf.nn.relu(conv1)
    conv1 = tf.nn.max_pool(conv1_a, ksize=[1, 2, 2, 1]\
                        ,strides=[1, 2, 2, 1],\
                        padding='SAME')
    conv1 = tf.nn.dropout(conv1, p_keep_conv)

    conv2 = tf.nn.conv2d(conv1, w2,\
                         strides=[1, 1, 1, 1],\
                         padding='SAME')
    conv2_a = tf.nn.relu(conv2)
    conv2 = tf.nn.max_pool(conv2_a, ksize=[1, 2, 2, 1],\
                        strides=[1, 2, 2, 1],\
                        padding='SAME')
    conv2 = tf.nn.dropout(conv2, p_keep_conv)

    conv3=tf.nn.conv2d(conv2, w3,\
                       strides=[1, 1, 1, 1]\
                       ,padding='SAME')

    conv3 = tf.nn.relu(conv3)


    FC_layer = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1],\
                        strides=[1, 2, 2, 1],\
                        padding='SAME')
    
    FC_layer = tf.reshape(FC_layer, [-1, w4.get_shape().as_list()[0]])    
    FC_layer = tf.nn.dropout(FC_layer, p_keep_conv)


    output_layer = tf.nn.relu(tf.matmul(FC_layer, w4))
    output_layer = tf.nn.dropout(output_layer, p_keep_hidden)

    result = tf.matmul(output_layer, w_o)
    return result


#mnist = mnist_data.read_data_sets("ata/")
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)

trX, trY, teX, teY = mnist.train.images,\
                     mnist.train.labels, \
                     mnist.test.images, \
                     mnist.test.labels

trX = trX.reshape(-1, img_size, img_size, 1)  # 28x28x1 input img
teX = teX.reshape(-1, img_size, img_size, 1)  # 28x28x1 input img

X = tf.placeholder("float", [None, img_size, img_size, 1])
Y = tf.placeholder("float", [None, num_classes])

w = init_weights([3, 3, 1, 32])       # 3x3x1 conv, 32 outputs
w2 = init_weights([3, 3, 32, 64])     # 3x3x32 conv, 64 outputs
w3 = init_weights([3, 3, 64, 128])    # 3x3x32 conv, 128 outputs
w4 = init_weights([128 * 4 * 4, 625]) # FC 128 * 4 * 4 inputs, 625 outputs
w_o = init_weights([625, num_classes])         # FC 625 inputs, 10 outputs (labels)

p_keep_conv = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")
py_x = model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden)

Y_ = tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y)
cost = tf.reduce_mean(Y_)
optimizer  = tf.train.\
           RMSPropOptimizer(0.001, 0.9).minimize(cost)
predict_op = tf.argmax(py_x, 1)

with tf.Session() as sess:
    #tf.initialize_all_variables().run()
    tf.global_variables_initializer().run()
    for i in range(100):
        training_batch = \
                       zip(range(0, len(trX), \
                                 batch_size),
                             range(batch_size, \
                                   len(trX)+1, \
                                   batch_size))
        for start, end in training_batch:
            sess.run(optimizer , feed_dict={X: trX[start:end],\
                                          Y: trY[start:end],\
                                          p_keep_conv: 0.8,\
                                          p_keep_hidden: 0.5})

        test_indices = np.arange(len(teX)) # Get A Test Batch
        np.random.shuffle(test_indices)
        test_indices = test_indices[0:test_size]

        print(i, np.mean(np.argmax(teY[test_indices], axis=1) ==\
                         sess.run\
                         (predict_op,\
                          feed_dict={X: teX[test_indices],\
                                     Y: teY[test_indices], \
                                     p_keep_conv: 1.0,\
                                     p_keep_hidden: 1.0})))

"""
Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
Successfully extracted to train-images-idx3-ubyte.mnist 9912422 bytes.
Loading ata/train-images-idx3-ubyte.mnist
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Successfully extracted to train-labels-idx1-ubyte.mnist 28881 bytes.
Loading ata/train-labels-idx1-ubyte.mnist
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Successfully extracted to t10k-images-idx3-ubyte.mnist 1648877 bytes.
Loading ata/t10k-images-idx3-ubyte.mnist
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Successfully extracted to t10k-labels-idx1-ubyte.mnist 4542 bytes.
Loading ata/t10k-labels-idx1-ubyte.mnist
(0, 0.95703125)
(1, 0.98046875)
(2, 0.9921875)
(3, 0.99609375)
(4, 0.99609375)
(5, 0.98828125)
(6, 0.99609375)
(7, 0.99609375)
(8, 0.98828125)
(9, 0.98046875)
(10, 0.99609375)
(11, 1.0)
(12, 0.9921875)
(13, 0.98046875)
(14, 0.98828125)
(15, 0.9921875)
(16, 0.9921875)
(17, 0.9921875)
(18, 0.9921875)
(19, 1.0)
(20, 0.98828125)
(21, 0.99609375)
(22, 0.98828125)
(23, 1.0)
(24, 0.9921875)
(25, 0.99609375)
(26, 0.99609375)
(27, 0.98828125)
(28, 0.98828125)
(29, 0.9921875)
(30, 0.99609375)
(31, 0.9921875)
(32, 0.99609375)
(33, 1.0)
(34, 0.99609375)
(35, 1.0)
(36, 0.9921875)
(37, 1.0)
(38, 0.99609375)
(39, 0.99609375)
(40, 0.99609375)
(41, 0.9921875)
(42, 0.98828125)
(43, 0.9921875)
(44, 0.9921875)
(45, 0.9921875)
(46, 0.9921875)
(47, 0.98828125)
(48, 0.99609375)
(49, 0.99609375)
(50, 1.0)
(51, 0.98046875)
(52, 0.99609375)
(53, 0.98828125)
(54, 0.99609375)
(55, 0.9921875)
(56, 0.99609375)
(57, 0.9921875)
(58, 0.98828125)
(59, 0.99609375)
(60, 0.99609375)
(61, 0.98828125)
(62, 1.0)
(63, 0.98828125)
(64, 0.98828125)
(65, 0.98828125)
(66, 1.0)
(67, 0.99609375)
(68, 1.0)
(69, 1.0)
(70, 0.9921875)
(71, 0.99609375)
(72, 0.984375)
(73, 0.9921875)
(74, 0.98828125)
(75, 0.99609375)
(76, 1.0)
(77, 0.9921875)
(78, 0.984375)
(79, 1.0)
(80, 0.9921875)
(81, 0.9921875)
(82, 0.99609375)
(83, 1.0)
(84, 0.98828125)
(85, 0.98828125)
(86, 0.99609375)
(87, 1.0)
(88, 0.99609375)
"""


================================================
FILE: Chapter04/MNIST_CNN/Python 3.5/mnist_cnn_1.py
================================================
import tensorflow as tf
import numpy as np
#import mnist_data 

batch_size = 128
test_size = 256
img_size = 28
num_classes = 10

def init_weights(shape):
    return tf.Variable(tf.random_normal(shape, stddev=0.01))


def model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):

    conv1 = tf.nn.conv2d(X, w,\
                         strides=[1, 1, 1, 1],\
                         padding='SAME')

    conv1_a = tf.nn.relu(conv1)
    conv1 = tf.nn.max_pool(conv1_a, ksize=[1, 2, 2, 1]\
                        ,strides=[1, 2, 2, 1],\
                        padding='SAME')
    conv1 = tf.nn.dropout(conv1, p_keep_conv)

    conv2 = tf.nn.conv2d(conv1, w2,\
                         strides=[1, 1, 1, 1],\
                         padding='SAME')
    conv2_a = tf.nn.relu(conv2)
    conv2 = tf.nn.max_pool(conv2_a, ksize=[1, 2, 2, 1],\
                        strides=[1, 2, 2, 1],\
                        padding='SAME')
    conv2 = tf.nn.dropout(conv2, p_keep_conv)

    conv3=tf.nn.conv2d(conv2, w3,\
                       strides=[1, 1, 1, 1]\
                       ,padding='SAME')

    conv3 = tf.nn.relu(conv3)


    FC_layer = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1],\
                        strides=[1, 2, 2, 1],\
                        padding='SAME')
    
    FC_layer = tf.reshape(FC_layer, [-1, w4.get_shape().as_list()[0]])    
    FC_layer = tf.nn.dropout(FC_layer, p_keep_conv)


    output_layer = tf.nn.relu(tf.matmul(FC_layer, w4))
    output_layer = tf.nn.dropout(output_layer, p_keep_hidden)

    result = tf.matmul(output_layer, w_o)
    return result


#mnist = mnist_data.read_data_sets("ata/")
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)

trX, trY, teX, teY = mnist.train.images,\
                     mnist.train.labels, \
                     mnist.test.images, \
                     mnist.test.labels

trX = trX.reshape(-1, img_size, img_size, 1)  # 28x28x1 input img
teX = teX.reshape(-1, img_size, img_size, 1)  # 28x28x1 input img

X = tf.placeholder("float", [None, img_size, img_size, 1])
Y = tf.placeholder("float", [None, num_classes])

w = init_weights([3, 3, 1, 32])       # 3x3x1 conv, 32 outputs
w2 = init_weights([3, 3, 32, 64])     # 3x3x32 conv, 64 outputs
w3 = init_weights([3, 3, 64, 128])    # 3x3x32 conv, 128 outputs
w4 = init_weights([128 * 4 * 4, 625]) # FC 128 * 4 * 4 inputs, 625 outputs
w_o = init_weights([625, num_classes])         # FC 625 inputs, 10 outputs (labels)

p_keep_conv = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")
py_x = model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden)

Y_ = tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y)
cost = tf.reduce_mean(Y_)
optimizer  = tf.train.\
           RMSPropOptimizer(0.001, 0.9).minimize(cost)
predict_op = tf.argmax(py_x, 1)

with tf.Session() as sess:
    #tf.initialize_all_variables().run()
    tf.global_variables_initializer().run()
    for i in range(100):
        training_batch = \
                       zip(range(0, len(trX), \
                                 batch_size),
                             range(batch_size, \
                                   len(trX)+1, \
                                   batch_size))
        for start, end in training_batch:
            sess.run(optimizer, feed_dict={X: trX[start:end],\
                                          Y: trY[start:end],\
                                          p_keep_conv: 0.8,\
                                          p_keep_hidden: 0.5})

        test_indices = np.arange(len(teX))# Get A Test Batch
        np.random.shuffle(test_indices)
        test_indices = test_indices[0:test_size]

        print(i, np.mean(np.argmax(teY[test_indices], axis=1) ==\
                         sess.run\
                         (predict_op,\
                          feed_dict={X: teX[test_indices],\
                                     Y: teY[test_indices], \
                                     p_keep_conv: 1.0,\
                                     p_keep_hidden: 1.0})))

"""
Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
Successfully extracted to train-images-idx3-ubyte.mnist 9912422 bytes.
Loading ata/train-images-idx3-ubyte.mnist
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Successfully extracted to train-labels-idx1-ubyte.mnist 28881 bytes.
Loading ata/train-labels-idx1-ubyte.mnist
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Successfully extracted to t10k-images-idx3-ubyte.mnist 1648877 bytes.
Loading ata/t10k-images-idx3-ubyte.mnist
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Successfully extracted to t10k-labels-idx1-ubyte.mnist 4542 bytes.
Loading ata/t10k-labels-idx1-ubyte.mnist
(0, 0.95703125)
(1, 0.98046875)
(2, 0.9921875)
(3, 0.99609375)
(4, 0.99609375)
(5, 0.98828125)
(6, 0.99609375)
(7, 0.99609375)
(8, 0.98828125)
(9, 0.98046875)
(10, 0.99609375)
(11, 1.0)
(12, 0.9921875)
(13, 0.98046875)
(14, 0.98828125)
(15, 0.9921875)
(16, 0.9921875)
(17, 0.9921875)
(18, 0.9921875)
(19, 1.0)
(20, 0.98828125)
(21, 0.99609375)
(22, 0.98828125)
(23, 1.0)
(24, 0.9921875)
(25, 0.99609375)
(26, 0.99609375)
(27, 0.98828125)
(28, 0.98828125)
(29, 0.9921875)
(30, 0.99609375)
(31, 0.9921875)
(32, 0.99609375)
(33, 1.0)
(34, 0.99609375)
(35, 1.0)
(36, 0.9921875)
(37, 1.0)
(38, 0.99609375)
(39, 0.99609375)
(40, 0.99609375)
(41, 0.9921875)
(42, 0.98828125)
(43, 0.9921875)
(44, 0.9921875)
(45, 0.9921875)
(46, 0.9921875)
(47, 0.98828125)
(48, 0.99609375)
(49, 0.99609375)
(50, 1.0)
(51, 0.98046875)
(52, 0.99609375)
(53, 0.98828125)
(54, 0.99609375)
(55, 0.9921875)
(56, 0.99609375)
(57, 0.9921875)
(58, 0.98828125)
(59, 0.99609375)
(60, 0.99609375)
(61, 0.98828125)
(62, 1.0)
(63, 0.98828125)
(64, 0.98828125)
(65, 0.98828125)
(66, 1.0)
(67, 0.99609375)
(68, 1.0)
(69, 1.0)
(70, 0.9921875)
(71, 0.99609375)
(72, 0.984375)
(73, 0.9921875)
(74, 0.98828125)
(75, 0.99609375)
(76, 1.0)
(77, 0.9921875)
(78, 0.984375)
(79, 1.0)
(80, 0.9921875)
(81, 0.9921875)
(82, 0.99609375)
(83, 1.0)
(84, 0.98828125)
(85, 0.98828125)
(86, 0.99609375)
(87, 1.0)
(88, 0.99609375)
"""


================================================
FILE: Chapter05/Python 2.7/Convlutional_AutoEncoder.py
================================================
import matplotlib.pyplot as plt
import numpy as np
import math
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data

from tensorflow.python.framework import ops
import warnings
import random
import os

warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
ops.reset_default_graph()

# LOAD PACKAGES
mnist = input_data.read_data_sets("data/", one_hot=True)
trainimgs = mnist.train.images
trainlabels = mnist.train.labels
testimgs = mnist.test.images
testlabels = mnist.test.labels
ntrain = trainimgs.shape[0]
ntest = testimgs.shape[0]
dim = trainimgs.shape[1]
nout = trainlabels.shape[1]

print("Packages loaded")
# WEIGHT AND BIASES
n1 = 16
n2 = 32
n3 = 64
ksize = 5

weights = {
    'ce1': tf.Variable(tf.random_normal([ksize, ksize, 1, n1], stddev=0.1)),
    'ce2': tf.Variable(tf.random_normal([ksize, ksize, n1, n2], stddev=0.1)),
    'ce3': tf.Variable(tf.random_normal([ksize, ksize, n2, n3], stddev=0.1)),
    'cd3': tf.Variable(tf.random_normal([ksize, ksize, n2, n3], stddev=0.1)),
    'cd2': tf.Variable(tf.random_normal([ksize, ksize, n1, n2], stddev=0.1)),
    'cd1': tf.Variable(tf.random_normal([ksize, ksize, 1, n1], stddev=0.1))
}
biases = {
    'be1': tf.Variable(tf.random_normal([n1], stddev=0.1)),
    'be2': tf.Variable(tf.random_normal([n2], stddev=0.1)),
    'be3': tf.Variable(tf.random_normal([n3], stddev=0.1)),
    'bd3': tf.Variable(tf.random_normal([n2], stddev=0.1)),
    'bd2': tf.Variable(tf.random_normal([n1], stddev=0.1)),
    'bd1': tf.Variable(tf.random_normal([1], stddev=0.1))
}


def cae(_X, _W, _b, _keepprob):
    _input_r = tf.reshape(_X, shape=[-1, 28, 28, 1])
    # Encoder
    _ce1 = tf.nn.sigmoid(tf.add(tf.nn.conv2d(_input_r, _W['ce1'], strides=[1, 2, 2, 1], padding='SAME'), _b['be1']))
    _ce1 = tf.nn.dropout(_ce1, _keepprob)
    _ce2 = tf.nn.sigmoid(tf.add(tf.nn.conv2d(_ce1, _W['ce2'], strides=[1, 2, 2, 1], padding='SAME'), _b['be2']))
    _ce2 = tf.nn.dropout(_ce2, _keepprob)
    _ce3 = tf.nn.sigmoid(tf.add(tf.nn.conv2d(_ce2, _W['ce3'], strides=[1, 2, 2, 1], padding='SAME'), _b['be3']))
    _ce3 = tf.nn.dropout(_ce3, _keepprob)
    # Decoder
    _cd3 = tf.nn.sigmoid(tf.add(tf.nn.conv2d_transpose(_ce3, _W['cd3'], tf.stack([tf.shape(_X)[0], 7, 7, n2]), strides=[1, 2, 2, 1], padding='SAME'), _b['bd3']))
    _cd3 = tf.nn.dropout(_cd3, _keepprob)
    _cd2 = tf.nn.sigmoid(tf.add(tf.nn.conv2d_transpose(_cd3, _W['cd2'], tf.stack([tf.shape(_X)[0], 14, 14, n1]), strides=[1, 2, 2, 1], padding='SAME'), _b['bd2']))
    _cd2 = tf.nn.dropout(_cd2, _keepprob)
    _cd1 = tf.nn.sigmoid(tf.add(tf.nn.conv2d_transpose(_cd2, _W['cd1'], tf.stack([tf.shape(_X)[0], 28, 28, 1]), strides=[1, 2, 2, 1], padding='SAME'), _b['bd1']))
    _cd1 = tf.nn.dropout(_cd1, _keepprob)
    _out = _cd1
    return _out

print("Network ready")
x = tf.placeholder(tf.float32, [None, dim])
y = tf.placeholder(tf.float32, [None, dim])
keepprob = tf.placeholder(tf.float32)
pred = cae(x, weights, biases, keepprob)  # ['out']
cost = tf.reduce_sum(tf.square(cae(x, weights, biases, keepprob)- tf.reshape(y, shape=[-1, 28, 28, 1])))

learning_rate = 0.001
optm = tf.train.AdamOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()

print("Functions ready")
sess = tf.Session()
sess.run(init)

# mean_img = np.mean(mnist.train.images, axis=0)
mean_img = np.zeros((784))
# Fit all training data
batch_size = 128
n_epochs = 50
print("Strart training..")

for epoch_i in range(n_epochs):
    for batch_i in range(mnist.train.num_examples // batch_size):
        batch_xs, _ = mnist.train.next_batch(batch_size)
        trainbatch = np.array([img - mean_img for img in batch_xs])
        trainbatch_noisy = trainbatch + 0.3 * np.random.randn(
        trainbatch.shape[0], 784)
        sess.run(optm, feed_dict={x: trainbatch_noisy, y: trainbatch, keepprob: 0.7})
        print("[%02d/%02d] cost: %.4f" % (epoch_i, n_epochs, sess.run(cost, feed_dict={x: trainbatch_noisy, y: trainbatch, keepprob: 1.})))

    if (epoch_i % 10) == 0:
        n_examples = 5
        test_xs, _ = mnist.test.next_batch(n_examples)
        test_xs_noisy = test_xs + 0.3 * np.random.randn(
        test_xs.shape[0], 784)
        recon = sess.run(pred, feed_dict={x: test_xs_noisy,keepprob: 1.})
        fig, axs = plt.subplots(2, n_examples, figsize=(15, 4))

        for example_i in range(n_examples):
             axs[0][example_i].matshow(np.reshape(test_xs_noisy[example_i, :], (28, 28)), cmap=plt.get_cmap('gray'))
             axs[1][example_i].matshow(np.reshape(np.reshape(recon[example_i, ...], (784,))+ mean_img, (28, 28)), cmap=plt.get_cmap('gray'))
             plt.show()


================================================
FILE: Chapter05/Python 2.7/autoencoder_1.py
================================================
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt


# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

#mnist = mnist_data.read_data_sets("data/")

# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 256
display_step = 1
examples_to_show = 10

# Network Parameters
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 128 # 2nd layer num features
n_input = 784 # MNIST data input (img shape: 28*28)

# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])

weights = {
    'encoder_h1': tf.Variable\
    (tf.random_normal([n_input, n_hidden_1])),
    'encoder_h2': tf.Variable\
    (tf.random_normal([n_hidden_1, n_hidden_2])),
    'decoder_h1': tf.Variable\
    (tf.random_normal([n_hidden_2, n_hidden_1])),
    'decoder_h2': tf.Variable\
    (tf.random_normal([n_hidden_1, n_input])),
}
biases = {
    'encoder_b1': tf.Variable\
    (tf.random_normal([n_hidden_1])),
    'encoder_b2': tf.Variable\
    (tf.random_normal([n_hidden_2])),
    'decoder_b1': tf.Variable\
    (tf.random_normal([n_hidden_1])),
    'decoder_b2': tf.Variable\
    (tf.random_normal([n_input])),
}



# Encoder Hidden layer with sigmoid activation #1
encoder_in = tf.nn.sigmoid(tf.add\
                           (tf.matmul(X, \
                                      weights['encoder_h1']),\
                            biases['encoder_b1']))

# Decoder Hidden layer with sigmoid activation #2
encoder_out = tf.nn.sigmoid(tf.add\
                            (tf.matmul(encoder_in,\
                                       weights['encoder_h2']),\
                             biases['encoder_b2']))


# Encoder Hidden layer with sigmoid activation #1
decoder_in = tf.nn.sigmoid(tf.add\
                           (tf.matmul(encoder_out,\
                                      weights['decoder_h1']),\
                            biases['decoder_b1']))

# Decoder Hidden layer with sigmoid activation #2
decoder_out = tf.nn.sigmoid(tf.add\
                            (tf.matmul(decoder_in,\
                                       weights['decoder_h2']),\
                             biases['decoder_b2']))


# Prediction
y_pred = decoder_out
# Targets (Labels) are the input data.
y_true = X

# Define loss and optimizer, minimize the squared error
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)

# Initializing the variables
init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    total_batch = int(mnist.train.num_examples/batch_size)
    # Training cycle
    for epoch in range(training_epochs):
        # Loop over all batches
        for i in range(total_batch):
            batch_xs, batch_ys =\
                      mnist.train.next_batch(batch_size)
            # Run optimization op (backprop) and cost op (to get loss value)
            _, c = sess.run([optimizer, cost],\
                            feed_dict={X: batch_xs})
        # Display logs per epoch step
        if epoch % display_step == 0:
            print("Epoch:", '%04d' % (epoch+1),
                  "cost=", "{:.9f}".format(c))

    print("Optimization Finished!")

    # Applying encode and decode over test set
    encode_decode = sess.run(
        y_pred, feed_dict=\
        {X: mnist.test.images[:examples_to_show]})
    # Compare original images with their reconstructions
    f, a = plt.subplots(2, 4, figsize=(10, 5))
    for i in range(examples_to_show):
        a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
        a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))
    f.show()
    plt.draw()
    plt.show()


================================================
FILE: Chapter05/Python 2.7/deconvolutional_autoencoder_1.py
================================================
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data

#Plot function
def plotresult(org_vec,noisy_vec,out_vec):
    plt.matshow(np.reshape(org_vec, (28, 28)),\
                cmap=plt.get_cmap('gray'))
    plt.title("Original Image")
    plt.colorbar()

    plt.matshow(np.reshape(noisy_vec, (28, 28)),\
                cmap=plt.get_cmap('gray'))
    plt.title("Input Image")
    plt.colorbar()
    
    outimg   = np.reshape(out_vec, (28, 28))
    plt.matshow(outimg, cmap=plt.get_cmap('gray'))
    plt.title("Reconstructed Image")
    plt.colorbar()
    plt.show()

# NETOWORK PARAMETERS
n_input    = 784 
n_hidden_1 = 256 
n_hidden_2 = 256 
n_output   = 784

epochs     = 110
batch_size = 100
disp_step  = 10

print ("PACKAGES LOADED")

mnist = input_data.read_data_sets('data/', one_hot=True)
trainimg   = mnist.train.images
trainlabel = mnist.train.labels
testimg    = mnist.test.images
testlabel  = mnist.test.labels
print ("MNIST LOADED")


# PLACEHOLDERS
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_output])
dropout_keep_prob = tf.placeholder("float")

# WEIGHTS
weights = {
    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_hidden_2, n_output]))
}
biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_output]))
}


encode_in = tf.nn.sigmoid\
          (tf.add(tf.matmul\
                  (x, weights['h1']),\
                  biases['b1'])) 

encode_out = tf.nn.dropout\
             (encode_in, dropout_keep_prob) 

decode_in = tf.nn.sigmoid\
          (tf.add(tf.matmul\
                  (encode_out, weights['h2']),\
                  biases['b2'])) 

decode_out = tf.nn.dropout(decode_in,\
                           dropout_keep_prob) 


y_pred = tf.nn.sigmoid\
         (tf.matmul(decode_out,\
                    weights['out']) +\
          biases['out'])

# COST
cost = tf.reduce_mean(tf.pow(y_pred - y, 2))

# OPTIMIZER
optmizer = tf.train.RMSPropOptimizer(0.01).minimize(cost)

# INITIALIZER
init = tf.global_variables_initializer()

init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    print ("Start Training")
    for epoch in range(epochs):
        num_batch  = int(mnist.train.num_examples/batch_size)
        total_cost = 0.
        for i in range(num_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            batch_xs_noisy = batch_xs\
                             + 0.3*np.random.randn(batch_size, 784)
            feeds = {x: batch_xs_noisy,\
                     y: batch_xs, \
                     dropout_keep_prob: 0.8}
            sess.run(optmizer, feed_dict=feeds)
            total_cost += sess.run(cost, feed_dict=feeds)
        # DISPLAY
        if epoch % disp_step == 0:
            print ("Epoch %02d/%02d average cost: %.6f" 
                   % (epoch, epochs, total_cost/num_batch))

            # Test one
            print ("Start Test")
            randidx   = np.random.randint\
                        (testimg.shape[0], size=1)
            orgvec    = testimg[randidx, :]
            testvec   = testimg[randidx, :]
            label     = np.argmax(testlabel[randidx, :], 1)

            print ("Test label is %d" % (label)) 
            noisyvec = testvec + 0.3*np.random.randn(1, 784)
            outvec   = sess.run(y_pred,\
                                feed_dict={x: noisyvec,\
                                           dropout_keep_prob: 1})

            plotresult(orgvec,noisyvec,outvec)
            print ("restart Training")


================================================
FILE: Chapter05/Python 2.7/denoising_autoencoder_1.py
================================================
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data

#Plot function
def plotresult(org_vec,noisy_vec,out_vec):
    plt.matshow(np.reshape(org_vec, (28, 28)),\
                cmap=plt.get_cmap('gray'))
    plt.title("Original Image")
    plt.colorbar()

    plt.matshow(np.reshape(noisy_vec, (28, 28)),\
                cmap=plt.get_cmap('gray'))
    plt.title("Input Image")
    plt.colorbar()
    
    outimg   = np.reshape(out_vec, (28, 28))
    plt.matshow(outimg, cmap=plt.get_cmap('gray'))
    plt.title("Reconstructed Image")
    plt.colorbar()
    plt.show()

# NETOWRK PARAMETERS
n_input    = 784 
n_hidden_1 = 256 
n_hidden_2 = 256 
n_output   = 784

epochs     = 100
batch_size = 100
disp_step  = 10

print ("PACKAGES LOADED")

mnist = input_data.read_data_sets('data/', one_hot=True)
trainimg   = mnist.train.images
trainlabel = mnist.train.labels
testimg    = mnist.test.images
testlabel  = mnist.test.labels
print ("MNIST LOADED")


# PLACEHOLDERS
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_output])
dropout_keep_prob = tf.placeholder("float")

# WEIGHTS
weights = {
    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_hidden_2, n_output]))
}
biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_output]))
}


encode_in = tf.nn.sigmoid\
          (tf.add(tf.matmul\
                  (x, weights['h1']),\
                  biases['b1'])) 

encode_out = tf.nn.dropout\
             (encode_in, dropout_keep_prob) 

decode_in = tf.nn.sigmoid\
          (tf.add(tf.matmul\
                  (encode_out, weights['h2']),\
                  biases['b2'])) 

decode_out = tf.nn.dropout(decode_in,\
                           dropout_keep_prob) 


y_pred = tf.nn.sigmoid\
         (tf.matmul(decode_out,\
                    weights['out']) +\
          biases['out'])

# COST
cost = tf.reduce_mean(tf.pow(y_pred - y, 2))

# OPTIMIZER
optmizer = tf.train.RMSPropOptimizer(0.01).minimize(cost)

# INITIALIZER
init = tf.global_variables_initializer()

init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    print ("Start Training")
    for epoch in range(epochs):
        num_batch  = int(mnist.train.num_examples/batch_size)
        total_cost = 0.
        for i in range(num_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            batch_xs_noisy = batch_xs + 0.3*np.random.randn(batch_size, 784)
            feeds = {x: batch_xs_noisy, y: batch_xs, dropout_keep_prob: 0.8}
            sess.run(optmizer, feed_dict=feeds)
            total_cost += sess.run(cost, feed_dict=feeds)
        # DISPLAY
        if epoch % disp_step == 0:
            print ("Epoch %02d/%02d average cost: %.6f" 
                   % (epoch, epochs, total_cost/num_batch))

            # Test one
            print ("Start Test")
            randidx   = np.random.randint\
                        (testimg.shape[0], size=1)
            orgvec    = testimg[randidx, :]
            testvec   = testimg[randidx, :]
            label     = np.argmax(testlabel[randidx, :], 1)

            print ("Test label is %d" % (label)) 
            noisyvec = testvec + 0.3*np.random.randn(1, 784)
            outvec   = sess.run(y_pred,\
                                feed_dict={x: noisyvec,\
                                           dropout_keep_prob: 1})

            plotresult(orgvec,noisyvec,outvec)
            print ("restart Training")


    
""""
PACKAGES LOADED
Extracting data/train-images-idx3-ubyte.gz
Extracting data/train-labels-idx1-ubyte.gz
Extracting data/t10k-images-idx3-ubyte.gz
Extracting data/t10k-labels-idx1-ubyte.gz
MNIST LOADED
Start Training
Epoch 00/100 average cost: 0.212313
Start Test
Test label is 6
restart Training
Epoch 10/100 average cost: 0.033660
Start Test
Test label is 2
restart Training
Epoch 20/100 average cost: 0.026888
Start Test
Test label is 6
restart Training
Epoch 30/100 average cost: 0.023660
Start Test
Test label is 1
restart Training
Epoch 40/100 average cost: 0.021740
Start Test
Test label is 9
restart Training
Epoch 50/100 average cost: 0.020399
Start Test
Test label is 0
restart Training
Epoch 60/100 average cost: 0.019593
Start Test
Test label is 9
restart Training
Epoch 70/100 average cost: 0.019026
Start Test
Test label is 1
restart Training
Epoch 80/100 average cost: 0.018537
Start Test
Test label is 4
restart Training
Epoch 90/100 average cost: 0.018224
Start Test
Test label is 9
restart Training
"""


================================================
FILE: Chapter05/Python 3.5/Convlutional_AutoEncoder.py
================================================
import matplotlib.pyplot as plt
import numpy as np
import math
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data

from tensorflow.python.framework import ops
import warnings
import random
import os

warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
ops.reset_default_graph()

# LOAD PACKAGES
mnist = input_data.read_data_sets("data/", one_hot=True)
trainimgs = mnist.train.images
trainlabels = mnist.train.labels
testimgs = mnist.test.images
testlabels = mnist.test.labels
ntrain = trainimgs.shape[0]
ntest = testimgs.shape[0]
dim = trainimgs.shape[1]
nout = trainlabels.shape[1]

print("Packages loaded")
# WEIGHT AND BIASES
n1 = 16
n2 = 32
n3 = 64
ksize = 5

weights = {
    'ce1': tf.Variable(tf.random_normal([ksize, ksize, 1, n1], stddev=0.1)),
    'ce2': tf.Variable(tf.random_normal([ksize, ksize, n1, n2], stddev=0.1)),
    'ce3': tf.Variable(tf.random_normal([ksize, ksize, n2, n3], stddev=0.1)),
    'cd3': tf.Variable(tf.random_normal([ksize, ksize, n2, n3], stddev=0.1)),
    'cd2': tf.Variable(tf.random_normal([ksize, ksize, n1, n2], stddev=0.1)),
    'cd1': tf.Variable(tf.random_normal([ksize, ksize, 1, n1], stddev=0.1))
}
biases = {
    'be1': tf.Variable(tf.random_normal([n1], stddev=0.1)),
    'be2': tf.Variable(tf.random_normal([n2], stddev=0.1)),
    'be3': tf.Variable(tf.random_normal([n3], stddev=0.1)),
    'bd3': tf.Variable(tf.random_normal([n2], stddev=0.1)),
    'bd2': tf.Variable(tf.random_normal([n1], stddev=0.1)),
    'bd1': tf.Variable(tf.random_normal([1], stddev=0.1))
}


def cae(_X, _W, _b, _keepprob):
    _input_r = tf.reshape(_X, shape=[-1, 28, 28, 1])
    # Encoder
    _ce1 = tf.nn.sigmoid(tf.add(tf.nn.conv2d(_input_r, _W['ce1'], strides=[1, 2, 2, 1], padding='SAME'), _b['be1']))
    _ce1 = tf.nn.dropout(_ce1, _keepprob)
    _ce2 = tf.nn.sigmoid(tf.add(tf.nn.conv2d(_ce1, _W['ce2'], strides=[1, 2, 2, 1], padding='SAME'), _b['be2']))
    _ce2 = tf.nn.dropout(_ce2, _keepprob)
    _ce3 = tf.nn.sigmoid(tf.add(tf.nn.conv2d(_ce2, _W['ce3'], strides=[1, 2, 2, 1], padding='SAME'), _b['be3']))
    _ce3 = tf.nn.dropout(_ce3, _keepprob)
    # Decoder
    _cd3 = tf.nn.sigmoid(tf.add(tf.nn.conv2d_transpose(_ce3, _W['cd3'], tf.stack([tf.shape(_X)[0], 7, 7, n2]), strides=[1, 2, 2, 1], padding='SAME'), _b['bd3']))
    _cd3 = tf.nn.dropout(_cd3, _keepprob)
    _cd2 = tf.nn.sigmoid(tf.add(tf.nn.conv2d_transpose(_cd3, _W['cd2'], tf.stack([tf.shape(_X)[0], 14, 14, n1]), strides=[1, 2, 2, 1], padding='SAME'), _b['bd2']))
    _cd2 = tf.nn.dropout(_cd2, _keepprob)
    _cd1 = tf.nn.sigmoid(tf.add(tf.nn.conv2d_transpose(_cd2, _W['cd1'], tf.stack([tf.shape(_X)[0], 28, 28, 1]), strides=[1, 2, 2, 1], padding='SAME'), _b['bd1']))
    _cd1 = tf.nn.dropout(_cd1, _keepprob)
    _out = _cd1
    return _out

print("Network ready")
x = tf.placeholder(tf.float32, [None, dim])
y = tf.placeholder(tf.float32, [None, dim])
keepprob = tf.placeholder(tf.float32)
pred = cae(x, weights, biases, keepprob)  # ['out']
cost = tf.reduce_sum(tf.square(cae(x, weights, biases, keepprob)- tf.reshape(y, shape=[-1, 28, 28, 1])))

learning_rate = 0.001
optm = tf.train.AdamOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()

print("Functions ready")
sess = tf.Session()
sess.run(init)

# mean_img = np.mean(mnist.train.images, axis=0)
mean_img = np.zeros((784))
# Fit all training data
batch_size = 128
n_epochs = 50
print("Strart training..")

for epoch_i in range(n_epochs):
    for batch_i in range(mnist.train.num_examples // batch_size):
        batch_xs, _ = mnist.train.next_batch(batch_size)
        trainbatch = np.array([img - mean_img for img in batch_xs])
        trainbatch_noisy = trainbatch + 0.3 * np.random.randn(
        trainbatch.shape[0], 784)
        sess.run(optm, feed_dict={x: trainbatch_noisy, y: trainbatch, keepprob: 0.7})
        print("[%02d/%02d] cost: %.4f" % (epoch_i, n_epochs, sess.run(cost, feed_dict={x: trainbatch_noisy, y: trainbatch, keepprob: 1.})))

    if (epoch_i % 10) == 0:
        n_examples = 5
        test_xs, _ = mnist.test.next_batch(n_examples)
        test_xs_noisy = test_xs + 0.3 * np.random.randn(
        test_xs.shape[0], 784)
        recon = sess.run(pred, feed_dict={x: test_xs_noisy,keepprob: 1.})
        fig, axs = plt.subplots(2, n_examples, figsize=(15, 4))

        for example_i in range(n_examples):
             axs[0][example_i].matshow(np.reshape(test_xs_noisy[example_i, :], (28, 28)), cmap=plt.get_cmap('gray'))
             axs[1][example_i].matshow(np.reshape(np.reshape(recon[example_i, ...], (784,))+ mean_img, (28, 28)), cmap=plt.get_cmap('gray'))
             plt.show()


================================================
FILE: Chapter05/Python 3.5/__init__.py
================================================


================================================
FILE: Chapter05/Python 3.5/autoencoder_1.py
================================================
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt


# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

#mnist = mnist_data.read_data_sets("data/")

# Parameters
learning_rate = 0.01
training_epochs = 20
batch_size = 256
display_step = 1
examples_to_show = 10

# Network Parameters
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 128 # 2nd layer num features
n_input = 784 # MNIST data input (img shape: 28*28)

# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])

weights = {
    'encoder_h1': tf.Variable\
    (tf.random_normal([n_input, n_hidden_1])),
    'encoder_h2': tf.Variable\
    (tf.random_normal([n_hidden_1, n_hidden_2])),
    'decoder_h1': tf.Variable\
    (tf.random_normal([n_hidden_2, n_hidden_1])),
    'decoder_h2': tf.Variable\
    (tf.random_normal([n_hidden_1, n_input])),
}
biases = {
    'encoder_b1': tf.Variable\
    (tf.random_normal([n_hidden_1])),
    'encoder_b2': tf.Variable\
    (tf.random_normal([n_hidden_2])),
    'decoder_b1': tf.Variable\
    (tf.random_normal([n_hidden_1])),
    'decoder_b2': tf.Variable\
    (tf.random_normal([n_input])),
}



# Encoder Hidden layer with sigmoid activation #1
encoder_in = tf.nn.sigmoid(tf.add\
                           (tf.matmul(X, \
                                      weights['encoder_h1']),\
                            biases['encoder_b1']))

# Decoder Hidden layer with sigmoid activation #2
encoder_out = tf.nn.sigmoid(tf.add\
                            (tf.matmul(encoder_in,\
                                       weights['encoder_h2']),\
                             biases['encoder_b2']))


# Encoder Hidden layer with sigmoid activation #1
decoder_in = tf.nn.sigmoid(tf.add\
                           (tf.matmul(encoder_out,\
                                      weights['decoder_h1']),\
                            biases['decoder_b1']))

# Decoder Hidden layer with sigmoid activation #2
decoder_out = tf.nn.sigmoid(tf.add\
                            (tf.matmul(decoder_in,\
                                       weights['decoder_h2']),\
                             biases['decoder_b2']))


# Prediction
y_pred = decoder_out
# Targets (Labels) are the input data.
y_true = X

# Define loss and optimizer, minimize the squared error
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)

# Initializing the variables
init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    total_batch = int(mnist.train.num_examples/batch_size)
    # Training cycle
    for epoch in range(training_epochs):
        # Loop over all batches
        for i in range(total_batch):
            batch_xs, batch_ys =\
                      mnist.train.next_batch(batch_size)
            # Run optimization op (backprop) and cost op (to get loss value)
            _, c = sess.run([optimizer, cost],\
                            feed_dict={X: batch_xs})
        # Display logs per epoch step
        if epoch % display_step == 0:
            print("Epoch:", '%04d' % (epoch+1),
                  "cost=", "{:.9f}".format(c))

    print("Optimization Finished!")

    # Applying encode and decode over test set
    encode_decode = sess.run(
        y_pred, feed_dict=\
        {X: mnist.test.images[:examples_to_show]})
    # Compare original images with their reconstructions
    f, a = plt.subplots(2, 10, figsize=(10, 2))
    for i in range(examples_to_show):
        a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
        a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))
    f.show()
    plt.draw()
    plt.show()


================================================
FILE: Chapter05/Python 3.5/deconvolutional_autoencoder_1.py
================================================
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data

#Plot function
def plotresult(org_vec,noisy_vec,out_vec):
    plt.matshow(np.reshape(org_vec, (28, 28)), cmap=plt.get_cmap('gray'))
    plt.title("Original Image")
    plt.colorbar()

    plt.matshow(np.reshape(noisy_vec, (28, 28)), cmap=plt.get_cmap('gray'))
    plt.title("Input Image")
    plt.colorbar()
    
    outimg = np.reshape(out_vec, (28, 28))
    plt.matshow(outimg, cmap=plt.get_cmap('gray'))
    plt.title("Reconstructed Image")
    plt.colorbar()
    plt.show()

# NETOWORK PARAMETERS
n_input = 784
n_hidden_1 = 256 
n_hidden_2 = 256 
n_output = 784

epochs = 110
batch_size = 100
disp_step = 10

print("PACKAGES LOADED")

mnist = input_data.read_data_sets('data/', one_hot=True)
trainimg   = mnist.train.images
trainlabel = mnist.train.labels
testimg    = mnist.test.images
testlabel  = mnist.test.labels
print("MNIST LOADED")


# PLACEHOLDERS
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_output])
dropout_keep_prob = tf.placeholder("float")

# WEIGHTS
weights = {
    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_hidden_2, n_output]))
}
biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_output]))
}

encode_in = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['h1']), biases['b1']))
encode_out = tf.nn.dropout(encode_in, dropout_keep_prob)
decode_in = tf.nn.sigmoid(tf.add(tf.matmul(encode_out, weights['h2']), biases['b2']))
decode_out = tf.nn.dropout(decode_in, dropout_keep_prob)

y_pred = tf.nn.sigmoid(tf.matmul(decode_out, weights['out']) + biases['out'])

# COST
cost = tf.reduce_mean(tf.pow(y_pred - y, 2))

# OPTIMIZER
optmizer = tf.train.RMSPropOptimizer(0.01).minimize(cost)

# INITIALIZER
init = tf.global_variables_initializer()

init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    print("Start Training")
    for epoch in range(epochs):
        num_batch  = int(mnist.train.num_examples/batch_size)
        total_cost = 0.
        for i in range(num_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            batch_xs_noisy = batch_xs + 0.3*np.random.randn(batch_size, 784)
            feeds = {x: batch_xs_noisy, y: batch_xs, dropout_keep_prob: 0.8}
            sess.run(optmizer, feed_dict=feeds)
            total_cost += sess.run(cost, feed_dict=feeds)
        # DISPLAY
        if epoch % disp_step == 0:
            print("Epoch %02d/%02d average cost: %.6f" % (epoch, epochs, total_cost/num_batch))

            # Test one
            print ("Start Test")
            randidx = np.random.randint(testimg.shape[0], size=1)
            orgvec = testimg[randidx, :]
            testvec = testimg[randidx, :]
            label = np.argmax(testlabel[randidx, :], 1)

            print ("Test label is %d" % label)
            noisyvec = testvec + 0.3*np.random.randn(1, 784)
            outvec = sess.run(y_pred, feed_dict={x: noisyvec, dropout_keep_prob: 1})

            plotresult(orgvec,noisyvec,outvec)
            print("restart Training")

================================================
FILE: Chapter05/Python 3.5/denoising_autoencoder_1.py
================================================
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data

#Plot function
def plotresult(org_vec,noisy_vec,out_vec):
    plt.matshow(np.reshape(org_vec, (28, 28)), cmap=plt.get_cmap('gray'))
    plt.title("Original Image")
    plt.colorbar()

    plt.matshow(np.reshape(noisy_vec, (28, 28)), cmap=plt.get_cmap('gray'))
    plt.title("Input Image")
    plt.colorbar()
    
    outimg = np.reshape(out_vec, (28, 28))
    plt.matshow(outimg, cmap=plt.get_cmap('gray'))
    plt.title("Reconstructed Image")
    plt.colorbar()
    plt.show()

# NETOWRK PARAMETERS
n_input = 784
n_hidden_1 = 256 
n_hidden_2 = 256 
n_output = 784

epochs = 100
batch_size = 100
disp_step = 10

print("PACKAGES LOADED")

mnist = input_data.read_data_sets('data/', one_hot=True)
trainimg = mnist.train.images
trainlabel = mnist.train.labels
testimg = mnist.test.images
testlabel = mnist.test.labels
print("MNIST LOADED")


# PLACEHOLDERS
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_output])
dropout_keep_prob = tf.placeholder("float")

# WEIGHTS
weights = {
    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_hidden_2, n_output]))
}
biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_output]))
}


encode_in = tf.nn.sigmoid\
          (tf.add(tf.matmul\
                  (x, weights['h1']),\
                  biases['b1'])) 

encode_out = tf.nn.dropout\
             (encode_in, dropout_keep_prob) 

decode_in = tf.nn.sigmoid\
          (tf.add(tf.matmul\
                  (encode_out, weights['h2']),\
                  biases['b2'])) 

decode_out = tf.nn.dropout(decode_in,\
                           dropout_keep_prob) 


y_pred = tf.nn.sigmoid\
         (tf.matmul(decode_out,\
                    weights['out']) +\
          biases['out'])

# COST
cost = tf.reduce_mean(tf.pow(y_pred - y, 2))

# OPTIMIZER
optmizer = tf.train.RMSPropOptimizer(0.01).minimize(cost)

# INITIALIZER
init = tf.global_variables_initializer()

init = tf.global_variables_initializer()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    print("Start Training")
    for epoch in range(epochs):
        num_batch  = int(mnist.train.num_examples/batch_size)
        total_cost = 0.
        for i in range(num_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            batch_xs_noisy = batch_xs + 0.3*np.random.randn(batch_size, 784)
            feeds = {x: batch_xs_noisy, y: batch_xs, dropout_keep_prob: 0.8}
            sess.run(optmizer, feed_dict=feeds)
            total_cost += sess.run(cost, feed_dict=feeds)
        # DISPLAY
        if epoch % disp_step == 0:
            print("Epoch %02d/%02d average cost: %.6f"
                   % (epoch, epochs, total_cost/num_batch))

            # Test one
            print("Start Test")
            randidx   = np.random.randint\
                        (testimg.shape[0], size=1)
            orgvec    = testimg[randidx, :]
            testvec   = testimg[randidx, :]
            label     = np.argmax(testlabel[randidx, :], 1)

            print("Test label is %d" % (label))
            noisyvec = testvec + 0.3*np.random.randn(1, 784)
            outvec   = sess.run(y_pred,\
                                feed_dict={x: noisyvec,\
                                           dropout_keep_prob: 1})

            plotresult(orgvec,noisyvec,outvec)
            print("restart Training")


    
""""
PACKAGES LOADED
Extracting data/train-images-idx3-ubyte.gz
Extracting data/train-labels-idx1-ubyte.gz
Extracting data/t10k-images-idx3-ubyte.gz
Extracting data/t10k-labels-idx1-ubyte.gz
MNIST LOADED
Start Training
Epoch 00/100 average cost: 0.212313
Start Test
Test label is 6
restart Training
Epoch 10/100 average cost: 0.033660
Start Test
Test label is 2
restart Training
Epoch 20/100 average cost: 0.026888
Start Test
Test label is 6
restart Training
Epoch 30/100 average cost: 0.023660
Start Test
Test label is 1
restart Training
Epoch 40/100 average cost: 0.021740
Start Test
Test label is 9
restart Training
Epoch 50/100 average cost: 0.020399
Start Test
Test label is 0
restart Training
Epoch 60/100 average cost: 0.019593
Start Test
Test label is 9
restart Training
Epoch 70/100 average cost: 0.019026
Start Test
Test label is 1
restart Training
Epoch 80/100 average cost: 0.018537
Start Test
Test label is 4
restart Training
Epoch 90/100 average cost: 0.018224
Start Test
Test label is 9
restart Training
"""


================================================
FILE: Chapter06/Python 2.7/LSTM_model_1.py
================================================
import tensorflow as tf
from tensorflow.contrib import rnn

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

learning_rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10

n_input = 28 
n_steps = 28 
n_hidden = 128 
n_classes = 10 

x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])

weights = {
    'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
    'out': tf.Variable(tf.random_normal([n_classes]))
}

def RNN(x, weights, biases):
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(axis=0, num_or_size_splits=n_steps, value=x)
    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
    return tf.matmul(outputs[-1], weights['out']) + biases['out']

pred = RNN(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

correct_pred = tf. equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    step = 1
    while step * batch_size < training_iters:
        batch_x, batch_y = mnist.train.next_batch(batch_size)
        batch_x = batch_x.reshape((batch_size, n_steps, n_input))
        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
        if step % display_step == 0:
            acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
            loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
            print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
                  "{:.5f}".format(acc))
        step += 1
    print("Optimization Finished!")

    test_len = 128
    test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
    test_label = mnist.test.labels[:test_len]
    print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: test_data, y: test_label}))


================================================
FILE: Chapter06/Python 2.7/__init__.py
================================================


================================================
FILE: Chapter06/Python 2.7/bidirectional_RNN_1.py
================================================
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

learning_rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10

n_input = 28 
n_steps = 28 
n_hidden = 128 
n_classes = 10 

x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])

weights = {
    'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
}
biases = {
    'out': tf.Variable(tf.random_normal([n_classes]))
}

def BiRNN(x, weights, biases):
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(axis=0, num_or_size_splits=n_steps, value=x)
    lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    try:
        outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                              dtype=tf.float32)
    except Exception: # Old TensorFlow version only returns outputs not states
        outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                        dtype=tf.float32)
    return tf.matmul(outputs[-1], weights['out']) + biases['out']

pred = BiRNN(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    step = 1
    while step * batch_size < training_iters:
        batch_x, batch_y = mnist.train.next_batch(batch_size)
        batch_x = batch_x.reshape((batch_size, n_steps, n_input))
        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
        if step % display_step == 0:
            acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
            loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
            print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
                  "{:.5f}".format(acc))
        step += 1
    print("Optimization Finished!")

    test_len = 128
    test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
    test_label = mnist.test.labels[:test_len]
    print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: test_data, y: test_label}))


================================================
FILE: Chapter06/Python 3.5/LSTM_model_1.py
================================================
import tensorflow as tf
from tensorflow.contrib import rnn

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

learning_rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10

n_input = 28 
n_steps = 28 
n_hidden = 128 
n_classes = 10 

x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])

weights = {
    'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
    'out': tf.Variable(tf.random_normal([n_classes]))
}

def RNN(x, weights, biases):
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(axis=0, num_or_size_splits=n_steps, value=x)
    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
    return tf.matmul(outputs[-1], weights['out']) + biases['out']

pred = RNN(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

correct_pred = tf. equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    step = 1
    while step * batch_size < training_iters:
        batch_x, batch_y = mnist.train.next_batch(batch_size)
        batch_x = batch_x.reshape((batch_size, n_steps, n_input))
        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
        if step % display_step == 0:
            acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
            loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
            print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
                  "{:.5f}".format(acc))
        step += 1
    print("Optimization Finished!")

    test_len = 128
    test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
    test_label = mnist.test.labels[:test_len]
    print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: test_data, y: test_label}))


================================================
FILE: Chapter06/Python 3.5/__init__.py
================================================


================================================
FILE: Chapter06/Python 3.5/bidirectional_RNN_1.py
================================================
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

learning_rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10

n_input = 28 
n_steps = 28 
n_hidden = 128 
n_classes = 10 

x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])

weights = {
    'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
}
biases = {
    'out': tf.Variable(tf.random_normal([n_classes]))
}

def BiRNN(x, weights, biases):
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(axis=0, num_or_size_splits=n_steps, value=x)
    lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    try:
        outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                              dtype=tf.float32)
    except Exception: # Old TensorFlow version only returns outputs not states
        outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                        dtype=tf.float32)
    return tf.matmul(outputs[-1], weights['out']) + biases['out']

pred = BiRNN(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    step = 1
    while step * batch_size < training_iters:
        batch_x, batch_y = mnist.train.next_batch(batch_size)
        batch_x = batch_x.reshape((batch_size, n_steps, n_input))
        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
        if step % display_step == 0:
            acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
            loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
            print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
                  "{:.5f}".format(acc))
        step += 1
    print("Optimization Finished!")

    test_len = 128
    test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
    test_label = mnist.test.labels[:test_len]
    print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: test_data, y: test_label}))


================================================
FILE: Chapter07/Python 2.7/gpu_computing_with_multiple_GPU.py
================================================
import numpy as np
import tensorflow as tf
import datetime

log_device_placement = True
n = 10

A = np.random.rand(10000, 10000).astype('float32')
B = np.random.rand(10000, 10000).astype('float32')

c1 = []

def matpow(M, n):
    if n < 1: #Abstract cases where n < 1
        return M
    else:
        return tf.matmul(M, matpow(M, n-1))

#FIRST GPU
with tf.device('/gpu:0'):
    a = tf.placeholder(tf.float32, [10000, 10000])
    c1.append(matpow(a, n))
    
#SECOND GPU
with tf.device('/gpu:1'):
    b = tf.placeholder(tf.float32, [10000, 10000])
    c1.append(matpow(b, n))


with tf.device('/cpu:0'):
    sum = tf.add_n(c1) 
    print(sum)

t1_1 = datetime.datetime.now()
with tf.Session(config=tf.ConfigProto\
                 (allow_soft_placement=True,\
                log_device_placement=log_device_placement))\
                  as sess:
     sess.run(sum, {a:A, b:B})
t2_1 = datetime.datetime.now()


================================================
FILE: Chapter07/Python 2.7/gpu_example.py
================================================
import numpy as np
import tensorflow as tf
import datetime

log_device_placement = True

n = 10

A = np.random.rand(10000, 10000).astype('float32')
B = np.random.rand(10000, 10000).astype('float32')


c1 = []
c2 = []

def matpow(M, n):
    if n < 1: #Abstract cases where n < 1
        return M
    else:
        return tf.matmul(M, matpow(M, n-1))

with tf.device('/gpu:0'):
    a = tf.placeholder(tf.float32, [10000, 10000])
    b = tf.placeholder(tf.float32, [10000, 10000])
    c1.append(matpow(a, n))
    c1.append(matpow(b, n))
# If the below code does not work use '/job:localhost/replica:0/task:0/cpu:0' as the GPU device
with tf.device('/cpu:0'):
  sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n

t1_1 = datetime.datetime.now()
with tf.Session(config=tf.ConfigProto\
              (log_device_placement=log_device_placement)) as sess:
     sess.run(sum, {a:A, b:B})
t2_1 = datetime.datetime.now()


================================================
FILE: Chapter07/Python 2.7/gpu_soft_placemnet_1.py
================================================
import numpy as np
import tensorflow as tf
import datetime

log_device_placement = True
n = 10

A = np.random.rand(10000, 10000).astype('float32')
B = np.random.rand(10000, 10000).astype('float32')

c1 = []

def matpow(M, n):
    if n < 1: #Abstract cases where n < 1
        return M
    else:
        return tf.matmul(M, matpow(M, n-1))

with tf.device('/job:localhost/replica:0/task:0/cpu:0'):
    a = tf.placeholder(tf.float32, [10000, 10000])
    b = tf.placeholder(tf.float32, [10000, 10000])
    c1.append(matpow(a, n))
    c1.append(matpow(b, n))

with tf.device('/job:localhost/replica:0/task:0/cpu:1'):
    sum = tf.add_n(c1) 
    print(sum)	

t1_1 = datetime.datetime.now()
with tf.Session(config=tf.ConfigProto\
                 (allow_soft_placement=True,\
                log_device_placement=log_device_placement))\
                  as sess:
     sess.run(sum, {a:A, b:B})
t2_1 = datetime.datetime.now()


================================================
FILE: Chapter07/Python 3.5/gpu_computing_with_multiple_GPU.py
================================================
import numpy as np
import tensorflow as tf
import datetime

log_device_placement = True
n = 10

A = np.random.rand(10000, 10000).astype('float32')
B = np.random.rand(10000, 10000).astype('float32')

c1 = []

def matpow(M, n):
    if n < 1: #Abstract cases where n < 1
        return M
    else:
        return tf.matmul(M, matpow(M, n-1))

#FIRST GPU
with tf.device('/gpu:0'):
    a = tf.placeholder(tf.float32, [10000, 10000])
    c1.append(matpow(a, n))
    
#SECOND GPU
with tf.device('/gpu:1'):
    b = tf.placeholder(tf.float32, [10000, 10000])
    c1.append(matpow(b, n))


with tf.device('/cpu:0'):
    sum = tf.add_n(c1) 
    print(sum)

t1_1 = datetime.datetime.now()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=log_device_placement)) as sess:
     sess.run(sum, {a:A, b:B})

t2_1 = datetime.datetime.now()


================================================
FILE: Chapter07/Python 3.5/gpu_example.py
================================================
import numpy as np
import tensorflow as tf
import datetime

log_device_placement = True
n = 10
A = np.random.rand(10000, 10000).astype('float32')
B = np.random.rand(10000, 10000).astype('float32')
c1 = []
c2 = []

def matpow(M, n):
    if n < 1: #Abstract cases where n < 1
        return M
    else:
        return tf.matmul(M, matpow(M, n-1))

with tf.device('/gpu:0'): # For CPU use /cpu:0
    a = tf.placeholder(tf.float32, [10000, 10000])
    b = tf.placeholder(tf.float32, [10000, 10000])
    c1.append(matpow(a, n))
    c1.append(matpow(b, n))

# If the below code does not work use '/job:localhost/replica:0/task:0/cpu:0' as the GPU device
with tf.device('/cpu:0'):
  sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n

t1_1 = datetime.datetime.now()
with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:
     sess.run(sum, {a:A, b:B})

t2_1 = datetime.datetime.now()


================================================
FILE: Chapter07/Python 3.5/gpu_soft_placemnet_1.py
================================================
import numpy as np
import tensorflow as tf
import datetime

log_device_placement = True
n = 10

A = np.random.rand(10000, 10000).astype('float32')
B = np.random.rand(10000, 10000).astype('float32')

c1 = []

def matpow(M, n):
    if n < 1: #Abstract cases where n < 1
        return M
    else:
        return tf.matmul(M, matpow(M, n-1))

with tf.device('gpu:0'): # for CPU only, use /cpu:0
    a = tf.placeholder(tf.float32, [10000, 10000])
    b = tf.placeholder(tf.float32, [10000, 10000])
    c1.append(matpow(a, n))
    c1.append(matpow(b, n))

with tf.device('gpu:1'): # for CPU only, use /cpu:0
    sum = tf.add_n(c1) 
    print(sum)	

t1_1 = datetime.datetime.now()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=log_device_placement)) as sess:
     sess.run(sum, {a:A, b:B})

t2_1 = datetime.datetime.now()


================================================
FILE: Chapter08/Python 2.7/digit_classifier.py
================================================
from six.moves import xrange  
import tensorflow as tf
import prettytensor as pt
from prettytensor.tutorial import data_utils

tf.app.flags.DEFINE_string('save_path', None, 'Where to save the model checkpoints.')
FLAGS = tf.app.flags.FLAGS

BATCH_SIZE = 50
EPOCH_SIZE = 60000 // BATCH_SIZE
TEST_SIZE = 10000 // BATCH_SIZE

tf.app.flags.DEFINE_string('model', 'full','Choose one of the models, either full or conv')
FLAGS = tf.app.flags.FLAGS
def multilayer_fully_connected(images, labels):
                           images = pt.wrap(images)
                           with pt.defaults_scope(activation_fn=tf.nn.relu,l2loss=0.00001):
                           return (images.flatten().\
                                   fully_connected(100).\
                                   fully_connected(100).\
                                   softmax_classifier(10, labels))

def lenet5(images, labels):
    images = pt.wrap(images)
    with pt.defaults_scope\
         (activation_fn=tf.nn.relu, l2loss=0.00001):
    return (images.conv2d(5, 20).\
            max_pool(2, 2).\
            conv2d(5, 50).\
            max_pool(2, 2).\
            flatten().\
            fully_connected(500).\
            softmax_classifier(10, labels))

def main(_=None):
  image_placeholder = tf.placeholder\
                      (tf.float32, [BATCH_SIZE, 28, 28, 1])
  labels_placeholder = tf.placeholder\
                       (tf.float32, [BATCH_SIZE, 10])

if FLAGS.model == 'full':
    result = multilayer_fully_connected\
             (image_placeholder,\
              labels_placeholder)
  elif FLAGS.model == 'conv':
    result = lenet5(image_placeholder,\
                    labels_placeholder)
else:
    raise ValueError\
              ('model must be full or conv: %s' % FLAGS.model)

accuracy = result.softmax.\
           evaluate_classifier\
           (labels_placeholder,phase=pt.Phase.test)

train_images, train_labels = data_utils.mnist(training=True)
test_images, test_labels = data_utils.mnist(training=False)
optimizer = tf.train.GradientDescentOptimizer(0.01)
train_op = pt.apply_optimizer(optimizer,losses=[result.loss])
runner = pt.train.Runner(save_path=FLAGS.save_path)


with tf.Session():
    for epoch in xrange(10):
        train_images, train_labels = \
                      data_utils.permute_data\
                      ((train_images, train_labels))

        runner.train_model(train_op,result.\
                           loss,EPOCH_SIZE,\
                           feed_vars=(image_placeholder,\
                                      labels_placeholder),\
                           feed_data=pt.train.\
                           feed_numpy(BATCH_SIZE,\
                                      train_images,\
                                      train_labels),\
                           print_every=100)
        classification_accuracy = runner.evaluate_model\
                                  (accuracy,\
                                   TEST_SIZE,\
                                   feed_vars=(image_placeholder,\
                                              labels_placeholder),\
                                   feed_data=pt.train.\
                                   feed_numpy(BATCH_SIZE,\
                                              test_images,\
                                              test_labels))
  print('epoch’ , epoch + 1)
  print(‘accuracy’, classification_accuracy )

if __name__ == '__main__':
  tf.app.run()



================================================
FILE: Chapter08/Python 2.7/keras_movie_classifier_1.py
================================================
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence

# fix random seed for reproducibility
numpy.random.seed(7)

# load the dataset but only keep the top n words, zero the rest
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words)
# truncate and pad input sequences
max_review_length = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)

# create the model
embedding_vecor_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vecor_length,\
                       input_length=max_review_length))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',\
                 optimizer='adam',\
                   metrics=['accuracy'])
print(model.summary())

model.fit(X_train, y_train,\
     validation_data=(X_test, y_test),\
           nb_epoch=3, batch_size=64)

# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)

print("Accuracy: %.2f%%" % (scores[1]*100))


================================================
FILE: Chapter08/Python 2.7/keras_movie_classifier_using_convLayer_1.py
================================================
from __future__ import print_function

import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.layers import Conv1D, GlobalMaxPooling1D

# fix random seed for the reproducibility
numpy.random.seed(7)

# load the dataset but only keep the top n words, zero the rest
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
# truncate and pad input sequences
max_review_length = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)

# create the model
embedding_vector_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=max_review_length))
model.add(Conv1D(padding="same", activation="relu", kernel_size=3, num_filter=32))
model.add(GlobalMaxPooling1D())
model.add(LSTM(32, input_dim=64, return_sequences=True))
model.add(LSTM(24, return_sequences=True))
model.add(LSTM(1,  return_sequences=False))

model.add(Dense(2, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())

model.fit(X_train, y_train, validation_data=(X_test, y_test), num_epoch=3, batch_size=64)

# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)

print("Accuracy: %.2f%%" % (scores[1]*100))


================================================
FILE: Chapter08/Python 2.7/pretty_tensor_digit_1.py
================================================
import tensorflow as tf
import prettytensor as pt
from prettytensor.tutorial import data_utils

tf.app.flags.DEFINE_string('save_path', None, 'Where to save the model checkpoints.')
FLAGS = tf.app.flags.FLAGS

BATCH_SIZE = 50
EPOCH_SIZE = 60000 // BATCH_SIZE
TEST_SIZE = 10000 // BATCH_SIZE

tf.app.flags.DEFINE_string('model', 'full','Choose one of the models, either full or conv')
FLAGS = tf.app.flags.FLAGS
def multilayer_fully_connected(images, labels):
                           images = pt.wrap(images)
                           with pt.defaults_scope(activation_fn=tf.nn.relu,l2loss=0.00001):
                           	return (images.flatten().fully_connected(100).fully_connected(100).softmax_classifier(10, labels))


def lenet5(images, labels):
    images = pt.wrap(images)
    with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):
    	return (images.conv2d(5, 20).max_pool(2, 2).conv2d(5, 50).max_pool(2, 2).flatten().fully_connected(500).softmax_classifier(10, labels))


def main(_=None):
  image_placeholder = tf.placeholder\
                      (tf.float32, [BATCH_SIZE, 28, 28, 1])
  labels_placeholder = tf.placeholder\
                       (tf.float32, [BATCH_SIZE, 10])

if FLAGS.model == 'full':
    result = multilayer_fully_connected\
             (image_placeholder,\
              labels_placeholder)
elif FLAGS.model == 'conv':
  	result = lenet5(image_placeholder, labels_placeholder)
else:
    raise ValueError\
              ('model must be full or conv: %s' % FLAGS.model)

accuracy = result.softmax.\
           evaluate_classifier\
           (labels_placeholder,phase=pt.Phase.test)

train_images, train_labels = data_utils.mnist(training=True)
test_images, test_labels = data_utils.mnist(training=False)
optimizer = tf.train.GradientDescentOptimizer(0.01)
train_op = pt.apply_optimizer(optimizer,losses=[result.loss])
runner = pt.train.Runner(save_path=FLAGS.save_path)


with tf.Session():
    for epoch in xrange(10):
        train_images, train_labels = \
                      data_utils.permute_data\
                      ((train_images, train_labels))

        runner.train_model(train_op,result.\
                           loss,EPOCH_SIZE,\
                           feed_vars=(image_placeholder,\
                                      labels_placeholder),\
                           feed_data=pt.train.\
                           feed_numpy(BATCH_SIZE,\
                                      train_images,\
                                      train_labels),\
                           print_every=100)
        classification_accuracy = runner.evaluate_model\
                                  (accuracy,\
                                   TEST_SIZE,\
                                   feed_vars=(image_placeholder,\
                                              labels_placeholder),\
                                   feed_data=pt.train.\
                                   feed_numpy(BATCH_SIZE,\
                                              test_images,\
                                              test_labels))
  	print('epoch' , epoch + 1)
  	print('accuracy', classification_accuracy )
        
if __name__ == '__main__':
  tf.app.run()


================================================
FILE: Chapter08/Python 2.7/tflearn_titanic_classifier.py
================================================
from tflearn.datasets import titanic
titanic.download_dataset('titanic_dataset.csv')
from tflearn.data_utils import load_csv
data, labels = load_csv('titanic_dataset.csv', target_column=0,
                        categorical_labels=True, n_classes=2)

def preprocess(data, columns_to_ignore):
    for id in sorted(columns_to_ignore, reverse=True):
        [r.pop(id) for r in data]
    for i in range(len(data)):
        data[i][1] = 1. if data[i][1] == 'female' else 0.
    return np.array(data, dtype=np.float32)

to_ignore=[1, 6]
data = preprocess(data, to_ignore)
net = tflearn.input_data(shape=[None, 6])

net = tflearn.fully_connected(net, 32)
net = tflearn.fully_connected(net, 32)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net)
model = tflearn.DNN(net)
model.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)


================================================
FILE: Chapter08/Python 3.5/__init__.py
================================================


================================================
FILE: Chapter08/Python 3.5/digit_classifier.py
================================================
from six.moves import range  
import tensorflow as tf
import prettytensor as pt
from prettytensor.tutorial import data_utils

tf.app.flags.DEFINE_string('save_path', None, 'Where to save the model checkpoints.')
FLAGS = tf.app.flags.FLAGS

BATCH_SIZE = 50
EPOCH_SIZE = 60000 // BATCH_SIZE
TEST_SIZE = 10000 // BATCH_SIZE

image_placeholder = tf.placeholder\
                      (tf.float32, [BATCH_SIZE, 28, 28, 1])
labels_placeholder = tf.placeholder\
                       (tf.float32, [BATCH_SIZE, 10])

tf.app.flags.DEFINE_string('model', 'full','Choose one of the models, either full or conv')
FLAGS = tf.app.flags.FLAGS
def multilayer_fully_connected(images, labels):
                           images = pt.wrap(images)
                           with pt.defaults_scope(activation_fn=tf.nn.relu,l2loss=0.00001):
                               return (images.flatten().\
                                   fully_connected(100).\
                                   fully_connected(100).\
                                   softmax_classifier(10, labels))

def lenet5(images, labels):
    images = pt.wrap(images)
    with pt.defaults_scope\
         (activation_fn=tf.nn.relu, l2loss=0.00001):
        return (images.conv2d(5, 20).\
            max_pool(2, 2).\
            conv2d(5, 50).\
            max_pool(2, 2).\
            flatten().\
            fully_connected(500).\
            softmax_classifier(10, labels))

def main(_=None):
  image_placeholder = tf.placeholder\
                      (tf.float32, [BATCH_SIZE, 28, 28, 1])
  labels_placeholder = tf.placeholder\
                       (tf.float32, [BATCH_SIZE, 10])

if FLAGS.model == 'full':
    result = multilayer_fully_connected(image_placeholder, labels_placeholder)
elif FLAGS.model == 'conv':
    result = lenet5(image_placeholder, labels_placeholder)
else:
    raise ValueError('model must be full or conv: %s' % FLAGS.model)

accuracy = result.softmax.evaluate_classifier(labels_placeholder,phase=pt.Phase.test)

train_images, train_labels = data_utils.mnist(training=True)
test_images, test_labels = data_utils.mnist(training=False)
optimizer = tf.train.GradientDescentOptimizer(0.01)
train_op = pt.apply_optimizer(optimizer,losses=[result.loss])
runner = pt.train.Runner(save_path=FLAGS.save_path)


with tf.Session():
    for epoch in range(10):
        train_images, train_labels = \
                      data_utils.permute_data\
                      ((train_images, train_labels))

        runner.train_model(train_op,result.\
                           loss,EPOCH_SIZE,\
                           feed_vars=(image_placeholder,\
                                      labels_placeholder),\
                           feed_data=pt.train.\
                           feed_numpy(BATCH_SIZE,\
                                      train_images,\
                                      train_labels),\
                           print_every=100)
        classification_accuracy = runner.evaluate_model\
                                  (accuracy,\
                                   TEST_SIZE,\
                                   feed_vars=(image_placeholder,\
                                              labels_placeholder),\
                                   feed_data=pt.train.\
                                   feed_numpy(BATCH_SIZE,\
                                              test_images,\
                                              test_labels))

print('epoch' , epoch + 1)
print('accuracy', classification_accuracy)

if __name__ == '__main__':
  tf.app.run()



================================================
FILE: Chapter08/Python 3.5/keras_movie_classifier_1.py
================================================
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence

# fix random seed for reproducibility
numpy.random.seed(7)

# load the dataset but only keep the top n words, zero the rest
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words)
# truncate and pad input sequences
max_review_length = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)

# create the model
embedding_vecor_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vecor_length,\
                       input_length=max_review_length))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',\
                 optimizer='adam',\
                   metrics=['accuracy'])
print(model.summary())

model.fit(X_train, y_train,\
     validation_data=(X_test, y_test),\
           epochs=3, batch_size=64)

# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)

print("Accuracy: %.2f%%" % (scores[1]*100))


================================================
FILE: Chapter08/Python 3.5/keras_movie_classifier_using_convLayer_1.py
================================================
from __future__ import print_function

import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.layers import Conv1D, GlobalMaxPooling1D

# fix random seed for the reproducibility
numpy.random.seed(7)

# load the dataset but only keep the top n words, zero the rest
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
# truncate and pad input sequences
max_review_length = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)

# create the model
embedding_vector_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=max_review_length))
model.add(Conv1D(padding="same", activation="relu", kernel_size=3, num_filter=32))
model.add(GlobalMaxPooling1D())
model.add(LSTM(32, input_dim=64, return_sequences=True))
model.add(LSTM(24, return_sequences=True))
model.add(LSTM(1,  return_sequences=False))

model.add(Dense(2, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())

model.fit(X_train, y_train, validation_data=(X_test, y_test), num_epoch=3, batch_size=64)

# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)

print("Accuracy: %.2f%%" % (scores[1]*100))


================================================
FILE: Chapter08/Python 3.5/pretty_tensor_digit_1.py
================================================
import tensorflow as tf
import prettytensor as pt
from prettytensor.tutorial import data_utils

tf.app.flags.DEFINE_string('save_path', None, 'Where to save the model checkpoints.')
FLAGS = tf.app.flags.FLAGS

BATCH_SIZE = 50
EPOCH_SIZE = 60000 // BATCH_SIZE
TEST_SIZE = 10000 // BATCH_SIZE

image_placeholder = tf.placeholder(tf.float32, [BATCH_SIZE, 28, 28, 1])
labels_placeholder = tf.placeholder(tf.float32, [BATCH_SIZE, 10])

tf.app.flags.DEFINE_string('model', 'full','Choose one of the models, either full or conv')
FLAGS = tf.app.flags.FLAGS
def multilayer_fully_connected(images, labels):
                           images = pt.wrap(images)
                           with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):
                           	return (images.flatten().fully_connected(100).fully_connected(100).softmax_classifier(10, labels))


def lenet5(images, labels):
    images = pt.wrap(images)
    with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):
    	return (images.conv2d(5, 20).max_pool(2, 2).conv2d(5, 50).max_pool(2, 2).flatten().fully_connected(500).softmax_classifier(10, labels))


def main(_=None):
  image_placeholder = tf.placeholder(tf.float32, [BATCH_SIZE, 28, 28, 1])
  labels_placeholder = tf.placeholder(tf.float32, [BATCH_SIZE, 10])

if FLAGS.model == 'full':
    result = multilayer_fully_connected(image_placeholder, labels_placeholder)
elif FLAGS.model == 'conv':
  	result = lenet5(image_placeholder, labels_placeholder)
else:
    raise ValueError\
              ('model must be full or conv: %s' % FLAGS.model)

accuracy = result.softmax.evaluate_classifier(labels_placeholder,phase=pt.Phase.test)

train_images, train_labels = data_utils.mnist(training=True)
test_images, test_labels = data_utils.mnist(training=False)
optimizer = tf.train.GradientDescentOptimizer(0.01)
train_op = pt.apply_optimizer(optimizer,losses=[result.loss])
runner = pt.train.Runner(save_path=FLAGS.save_path)


with tf.Session():
    for epoch in range(10):
        train_images, train_labels = \
                      data_utils.permute_data\
                      ((train_images, train_labels))

        runner.train_model(train_op,result.\
                           loss,EPOCH_SIZE,\
                           feed_vars=(image_placeholder,\
                                      labels_placeholder),\
                           feed_data=pt.train.\
                           feed_numpy(BATCH_SIZE,\
                                      train_images,\
                                      train_labels),\
                           print_every=100)
        classification_accuracy = runner.evaluate_model\
                                  (accuracy,\
                                   TEST_SIZE,\
                                   feed_vars=(image_placeholder,\
                                              labels_placeholder),\
                                   feed_data=pt.train.\
                                   feed_numpy(BATCH_SIZE,\
                                              test_images,\
                                              test_labels))

    print('epoch' , epoch + 1)
    print('accuracy', classification_accuracy )
        
if __name__ == '__main__':
  tf.app.run()


================================================
FILE: Chapter08/Python 3.5/tflearn_titanic_classifier.py
================================================
import tflearn
from tflearn.datasets import titanic
import numpy as np
titanic.download_dataset('titanic_dataset.csv')
from tflearn.data_utils import load_csv
data, labels = load_csv('titanic_dataset.csv', target_column=0,
                        categorical_labels=True, n_classes=2)

def preprocess(data, columns_to_ignore):
    for id in sorted(columns_to_ignore, reverse=True):
        [r.pop(id) for r in data]
    for i in range(len(data)):
        data[i][1] = 1. if data[i][1] == 'female' else 0.
    return np.array(data, dtype=np.float32)

to_ignore=[1, 6]
data = preprocess(data, to_ignore)
net = tflearn.input_data(shape=[None, 6])

net = tflearn.fully_connected(net, 32)
net = tflearn.fully_connected(net, 32)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net)
model = tflearn.DNN(net)
model.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)

# Evalute the model
accuracy = model.evaluate(data, labels, batch_size=16)
print('Accuracy: ', accuracy)


================================================
FILE: Chapter08/data/titanic_dataset.csv
================================================
survived,pclass,name,sex,age,sibsp,parch,ticket,fare
1,1,"Allen, Miss. Elisabeth Walton",female,29,0,0,24160,211.3375
1,1,"Allison, Master. Hudson Trevor",male,0.9167,1,2,113781,151.5500
0,1,"Allison, Miss. Helen Loraine",female,2,1,2,113781,151.5500
0,1,"Allison, Mr. Hudson Joshua Creighton",male,30,1,2,113781,151.5500
0,1,"Allison, Mrs. Hudson J C (Bessie Waldo Daniels)",female,25,1,2,113781,151.5500
1,1,"Anderson, Mr. Harry",male,48,0,0,19952,26.5500
1,1,"Andrews, Miss. Kornelia Theodosia",female,63,1,0,13502,77.9583
0,1,"Andrews, Mr. Thomas Jr",male,39,0,0,112050,0.0000
1,1,"Appleton, Mrs. Edward Dale (Charlotte Lamson)",female,53,2,0,11769,51.4792
0,1,"Artagaveytia, Mr. Ramon",male,71,0,0,PC 17609,49.5042
0,1,"Astor, Col. John Jacob",male,47,1,0,PC 17757,227.5250
1,1,"Astor, Mrs. John Jacob (Madeleine Talmadge Force)",female,18,1,0,PC 17757,227.5250
1,1,"Aubart, Mme. Leontine Pauline",female,24,0,0,PC 17477,69.3000
1,1,"Barber, Miss. Ellen ""Nellie""",female,26,0,0,19877,78.8500
1,1,"Barkworth, Mr. Algernon Henry Wilson",male,80,0,0,27042,30.0000
0,1,"Baumann, Mr. John D",male,0,0,0,PC 17318,25.9250
0,1,"Baxter, Mr. Quigg Edmond",male,24,0,1,PC 17558,247.5208
1,1,"Baxter, Mrs. James (Helene DeLaudeniere Chaput)",female,50,0,1,PC 17558,247.5208
1,1,"Bazzani, Miss. Albina",female,32,0,0,11813,76.2917
0,1,"Beattie, Mr. Thomson",male,36,0,0,13050,75.2417
1,1,"Beckwith, Mr. Richard Leonard",male,37,1,1,11751,52.5542
1,1,"Beckwith, Mrs. Richard Leonard (Sallie Monypeny)",female,47,1,1,11751,52.5542
1,1,"Behr, Mr. Karl Howell",male,26,0,0,111369,30.0000
1,1,"Bidois, Miss. Rosalie",female,42,0,0,PC 17757,227.5250
1,1,"Bird, Miss. Ellen",female,29,0,0,PC 17483,221.7792
0,1,"Birnbaum, Mr. Jakob",male,25,0,0,13905,26.0000
1,1,"Bishop, Mr. Dickinson H",male,25,1,0,11967,91.0792
1,1,"Bishop, Mrs. Dickinson H (Helen Walton)",female,19,1,0,11967,91.0792
1,1,"Bissette, Miss. Amelia",female,35,0,0,PC 17760,135.6333
1,1,"Bjornstrom-Steffansson, Mr. Mauritz Hakan",male,28,0,0,110564,26.5500
0,1,"Blackwell, Mr. Stephen Weart",male,45,0,0,113784,35.5000
1,1,"Blank, Mr. Henry",male,40,0,0,112277,31.0000
1,1,"Bonnell, Miss. Caroline",female,30,0,0,36928,164.8667
1,1,"Bonnell, Miss. Elizabeth",female,58,0,0,113783,26.5500
0,1,"Borebank, Mr. John James",male,42,0,0,110489,26.5500
1,1,"Bowen, Miss. Grace Scott",female,45,0,0,PC 17608,262.3750
1,1,"Bowerman, Miss. Elsie Edith",female,22,0,1,113505,55.0000
1,1,"Bradley, Mr. George (""George Arthur Brayton"")",male,0,0,0,111427,26.5500
0,1,"Brady, Mr. John Bertram",male,41,0,0,113054,30.5000
0,1,"Brandeis, Mr. Emil",male,48,0,0,PC 17591,50.4958
0,1,"Brewe, Dr. Arthur Jackson",male,0,0,0,112379,39.6000
1,1,"Brown, Mrs. James Joseph (Margaret Tobin)",female,44,0,0,PC 17610,27.7208
1,1,"Brown, Mrs. John Murray (Caroline Lane Lamson)",female,59,2,0,11769,51.4792
1,1,"Bucknell, Mrs. William Robert (Emma Eliza Ward)",female,60,0,0,11813,76.2917
1,1,"Burns, Miss. Elizabeth Margaret",female,41,0,0,16966,134.5000
0,1,"Butt, Major. Archibald Willingham",male,45,0,0,113050,26.5500
0,1,"Cairns, Mr. Alexander",male,0,0,0,113798,31.0000
1,1,"Calderhead, Mr. Edward Pennington",male,42,0,0,PC 17476,26.2875
1,1,"Candee, Mrs. Edward (Helen Churchill Hungerford)",female,53,0,0,PC 17606,27.4458
1,1,"Cardeza, Mr. Thomas Drake Martinez",male,36,0,1,PC 17755,512.3292
1,1,"Cardeza, Mrs. James Warburton Martinez (Charlotte Wardle Drake)",female,58,0,1,PC 17755,512.3292
0,1,"Carlsson, Mr. Frans Olof",male,33,0,0,695,5.0000
0,1,"Carrau, Mr. Francisco M",male,28,0,0,113059,47.1000
0,1,"Carrau, Mr. Jose Pedro",male,17,0,0,113059,47.1000
1,1,"Carter, Master. William Thornton II",male,11,1,2,113760,120.0000
1,1,"Carter, Miss. Lucile Polk",female,14,1,2,113760,120.0000
1,1,"Carter, Mr. William Ernest",male,36,1,2,113760,120.0000
1,1,"Carter, Mrs. William Ernest (Lucile Polk)",female,36,1,2,113760,120.0000
0,1,"Case, Mr. Howard Brown",male,49,0,0,19924,26.0000
1,1,"Cassebeer, Mrs. Henry Arthur Jr (Eleanor Genevieve Fosdick)",female,0,0,0,17770,27.7208
0,1,"Cavendish, Mr. Tyrell William",male,36,1,0,19877,78.8500
1,1,"Cavendish, Mrs. Tyrell William (Julia Florence Siegel)",female,76,1,0,19877,78.8500
0,1,"Chaffee, Mr. Herbert Fuller",male,46,1,0,W.E.P. 5734,61.1750
1,1,"Chaffee, Mrs. Herbert Fuller (Carrie Constance Toogood)",female,47,1,0,W.E.P. 5734,61.1750
1,1,"Chambers, Mr. Norman Campbell",male,27,1,0,113806,53.1000
1,1,"Chambers, Mrs. Norman Campbell (Bertha Griggs)",female,33,1,0,113806,53.1000
1,1,"Chaudanson, Miss. Victorine",female,36,0,0,PC 17608,262.3750
1,1,"Cherry, Miss. Gladys",female,30,0,0,110152,86.5000
1,1,"Chevre, Mr. Paul Romaine",male,45,0,0,PC 17594,29.7000
1,1,"Chibnall, Mrs. (Edith Martha Bowerman)",female,0,0,1,113505,55.0000
0,1,"Chisholm, Mr. Roderick Robert Crispin",male,0,0,0,112051,0.0000
0,1,"Clark, Mr. Walter Miller",male,27,1,0,13508,136.7792
1,1,"Clark, Mrs. Walter Miller (Virginia McDowell)",female,26,1,0,13508,136.7792
1,1,"Cleaver, Miss. Alice",female,22,0,0,113781,151.5500
0,1,"Clifford, Mr. George Quincy",male,0,0,0,110465,52.0000
0,1,"Colley, Mr. Edward Pomeroy",male,47,0,0,5727,25.5875
1,1,"Compton, Miss. Sara Rebecca",female,39,1,1,PC 17756,83.1583
0,1,"Compton, Mr. Alexander Taylor Jr",male,37,1,1,PC 17756,83.1583
1,1,"Compton, Mrs. Alexander Taylor (Mary Eliza Ingersoll)",female,64,0,2,PC 17756,83.1583
1,1,"Cornell, Mrs. Robert Clifford (Malvina Helen Lamson)",female,55,2,0,11770,25.7000
0,1,"Crafton, Mr. John Bertram",male,0,0,0,113791,26.5500
0,1,"Crosby, Capt. Edward Gifford",male,70,1,1,WE/P 5735,71.0000
1,1,"Crosby, Miss. Harriet R",female,36,0,2,WE/P 5735,71.0000
1,1,"Crosby, Mrs. Edward Gifford (Catherine Elizabeth Halstead)",female,64,1,1,112901,26.5500
0,1,"Cumings, Mr. John Bradley",male,39,1,0,PC 17599,71.2833
1,1,"Cumings, Mrs. John Bradley (Florence Briggs Thayer)",female,38,1,0,PC 17599,71.2833
1,1,"Daly, Mr. Peter Denis ",male,51,0,0,113055,26.5500
1,1,"Daniel, Mr. Robert Williams",male,27,0,0,113804,30.5000
1,1,"Daniels, Miss. Sarah",female,33,0,0,113781,151.5500
0,1,"Davidson, Mr. Thornton",male,31,1,0,F.C. 12750,52.0000
1,1,"Davidson, Mrs. Thornton (Orian Hays)",female,27,1,2,F.C. 12750,52.0000
1,1,"Dick, Mr. Albert Adrian",male,31,1,0,17474,57.0000
1,1,"Dick, Mrs. Albert Adrian (Vera Gillespie)",female,17,1,0,17474,57.0000
1,1,"Dodge, Dr. Washington",male,53,1,1,33638,81.8583
1,1,"Dodge, Master. Washington",male,4,0,2,33638,81.8583
1,1,"Dodge, Mrs. Washington (Ruth Vidaver)",female,54,1,1,33638,81.8583
0,1,"Douglas, Mr. Walter Donald",male,50,1,0,PC 17761,106.4250
1,1,"Douglas, Mrs. Frederick Charles (Mary Helene Baxter)",female,27,1,1,PC 17558,247.5208
1,1,"Douglas, Mrs. Walter Donald (Mahala Dutton)",female,48,1,0,PC 17761,106.4250
1,1,"Duff Gordon, Lady. (Lucille Christiana Sutherland) (""Mrs Morgan"")",female,48,1,0,11755,39.6000
1,1,"Duff Gordon, Sir. Cosmo Edmund (""Mr Morgan"")",male,49,1,0,PC 17485,56.9292
0,1,"Dulles, Mr. William Crothers",male,39,0,0,PC 17580,29.7000
1,1,"Earnshaw, Mrs. Boulton (Olive Potter)",female,23,0,1,11767,83.1583
1,1,"Endres, Miss. Caroline Louise",female,38,0,0,PC 17757,227.5250
1,1,"Eustis, Miss. Elizabeth Mussey",female,54,1,0,36947,78.2667
0,1,"Evans, Miss. Edith Corse",female,36,0,0,PC 17531,31.6792
0,1,"Farthing, Mr. John",male,0,0,0,PC 17483,221.7792
1,1,"Flegenheim, Mrs. Alfred (Antoinette)",female,0,0,0,PC 17598,31.6833
1,1,"Fleming, Miss. Margaret",female,0,0,0,17421,110.8833
1,1,"Flynn, Mr. John Irwin (""Irving"")",male,36,0,0,PC 17474,26.3875
0,1,"Foreman, Mr. Benjamin Laventall",male,30,0,0,113051,27.7500
1,1,"Fortune, Miss. Alice Elizabeth",female,24,3,2,19950,263.0000
1,1,"Fortune, Miss. Ethel Flora",female,28,3,2,19950,263.0000
1,1,"Fortune, Miss. Mabel Helen",female,23,3,2,19950,263.0000
0,1,"Fortune, Mr. Charles Alexander",male,19,3,2,19950,263.0000
0,1,"Fortune, Mr. Mark",male,64,1,4,19950,263.0000
1,1,"Fortune, Mrs. Mark (Mary McDougald)",female,60,1,4,19950,263.0000
1,1,"Francatelli, Miss. Laura Mabel",female,30,0,0,PC 17485,56.9292
0,1,"Franklin, Mr. Thomas Parham",male,0,0,0,113778,26.5500
1,1,"Frauenthal, Dr. Henry William",male,50,2,0,PC 17611,133.6500
1,1,"Frauenthal, Mr. Isaac Gerald",male,43,1,0,17765,27.7208
1,1,"Frauenthal, Mrs. Henry William (Clara Heinsheimer)",female,0,1,0,PC 17611,133.6500
1,1,"Frolicher, Miss. Hedwig Margaritha",female,22,0,2,13568,49.5000
1,1,"Frolicher-Stehli, Mr. Maxmillian",male,60,1,1,13567,79.2000
1,1,"Frolicher-Stehli, Mrs. Maxmillian (Margaretha Emerentia Stehli)",female,48,1,1,13567,79.2000
0,1,"Fry, Mr. Richard",male,0,0,0,112058,0.0000
0,1,"Futrelle, Mr. Jacques Heath",male,37,1,0,113803,53.1000
1,1,"Futrelle, Mrs. Jacques Heath (Lily May Peel)",female,35,1,0,113803,53.1000
0,1,"Gee, Mr. Arthur H",male,47,0,0,111320,38.5000
1,1,"Geiger, Miss. Amalie",female,35,0,0,113503,211.5000
1,1,"Gibson, Miss. Dorothy Winifred",female,22,0,1,112378,59.4000
1,1,"Gibson, Mrs. Leonard (Pauline C Boeson)",female,45,0,1,112378,59.4000
0,1,"Giglio, Mr. Victor",male,24,0,0,PC 17593,79.2000
1,1,"Goldenberg, Mr. Samuel L",male,49,1,0,17453,89.1042
1,1,"Goldenberg, Mrs. Samuel L (Edwiga Grabowska)",female,0,1,0,17453,89.1042
0,1,"Goldschmidt, Mr. George B",male,71,0,0,PC 17754,34.6542
1,1,"Gracie, Col. Archibald IV",male,53,0,0,113780,28.5000
1,1,"Graham, Miss. Margaret Edith",female,19,0,0,112053,30.0000
0,1,"Graham, Mr. George Edward",male,38,0,1,PC 17582,153.4625
1,1,"Graham, Mrs. William Thompson (Edith Junkins)",female,58,0,1,PC 17582,153.4625
1,1,"Greenfield, Mr. William Bertram",male,23,0,1,PC 17759,63.3583
1,1,"Greenfield, Mrs. Leo David (Blanche Strouse)",female,45,0,1,PC 17759,63.3583
0,1,"Guggenheim, Mr. Benjamin",male,46,0,0,PC 17593,79.2000
1,1,"Harder, Mr. George Achilles",male,25,1,0,11765,55.4417
1,1,"Harder, Mrs. George Achilles (Dorothy Annan)",female,25,1,0,11765,55.4417
1,1,"Harper, Mr. Henry Sleeper",male,48,1,0,PC 17572,76.7292
1,1,"Harper, Mrs. Henry Sleeper (Myna Haxtun)",female,49,1,0,PC 17572,76.7292
0,1,"Harrington, Mr. Charles H",male,0,0,0,113796,42.4000
0,1,"Harris, Mr. Henry Birkhardt",male,45,1,0,36973,83.4750
1,1,"Harris, Mrs. Henry Birkhardt (Irene Wallach)",female,35,1,0,36973,83.4750
0,1,"Harrison, Mr. William",male,40,0,0,112059,0.0000
1,1,"Hassab, Mr. Hammad",male,27,0,0,PC 17572,76.7292
1,1,"Hawksford, Mr. Walter James",male,0,0,0,16988,30.0000
1,1,"Hays, Miss. Margaret Bechstein",female,24,0,0,11767,83.1583
0,1,"Hays, Mr. Charles Melville",male,55,1,1,12749,93.5000
1,1,"Hays, Mrs. Charles Melville (Clara Jennings Gregg)",female,52,1,1,12749,93.5000
0,1,"Head, Mr. Christopher",male,42,0,0,113038,42.5000
0,1,"Hilliard, Mr. Herbert Henry",male,0,0,0,17463,51.8625
0,1,"Hipkins, Mr. William Edward",male,55,0,0,680,50.0000
1,1,"Hippach, Miss. Jean Gertrude",female,16,0,1,111361,57.9792
1,1,"Hippach, Mrs. Louis Albert (Ida Sophia Fischer)",female,44,0,1,111361,57.9792
1,1,"Hogeboom, Mrs. John C (Anna Andrews)",female,51,1,0,13502,77.9583
0,1,"Holverson, Mr. Alexander Oskar",male,42,1,0,113789,52.0000
1,1,"Holverson, Mrs. Alexander Oskar (Mary Aline Towner)",female,35,1,0,113789,52.0000
1,1,"Homer, Mr. Harry (""Mr E Haven"")",male,35,0,0,111426,26.5500
1,1,"Hoyt, Mr. Frederick Maxfield",male,38,1,0,19943,90.0000
0,1,"Hoyt, Mr. William Fisher",male,0,0,0,PC 17600,30.6958
1,1,"Hoyt, Mrs. Frederick Maxfield (Jane Anne Forby)",female,35,1,0,19943,90.0000
1,1,"Icard, Miss. Amelie",female,38,0,0,113572,80.0000
0,1,"Isham, Miss. Ann Elizabeth",female,50,0,0,PC 17595,28.7125
1,1,"Ismay, Mr. Joseph Bruce",male,49,0,0,112058,0.0000
0,1,"Jones, Mr. Charles Cresson",male,46,0,0,694,26.0000
0,1,"Julian, Mr. Henry Forbes",male,50,0,0,113044,26.0000
0,1,"Keeping, Mr. Edwin",male,32.5,0,0,113503,211.5000
0,1,"Kent, Mr. Edward Austin",male,58,0,0,11771,29.7000
0,1,"Kenyon, Mr. Frederick R",male,41,1,0,17464,51.8625
1,1,"Kenyon, Mrs. Frederick R (Marion)",female,0,1,0,17464,51.8625
1,1,"Kimball, Mr. Edwin Nelson Jr",male,42,1,0,11753,52.5542
1,1,"Kimball, Mrs. Edwin Nelson Jr (Gertrude Parsons)",female,45,1,0,11753,52.5542
0,1,"Klaber, Mr. Herman",male,0,0,0,113028,26.5500
1,1,"Kreuchen, Miss. Emilie",female,39,0,0,24160,211.3375
1,1,"Leader, Dr. Alice (Farnham)",female,49,0,0,17465,25.9292
1,1,"LeRoy, Miss. Bertha",female,30,0,0,PC 17761,106.4250
1,1,"Lesurer, Mr. Gustave J",male,35,0,0,PC 17755,512.3292
0,1,"Lewy, Mr. Ervin G",male,0,0,0,PC 17612,27.7208
0,1,"Lindeberg-Lind, Mr. Erik Gustaf (""Mr Edward Lingrey"")",male,42,0,0,17475,26.5500
1,1,"Lindstrom, Mrs. Carl Johan (Sigrid Posse)",female,55,0,0,112377,27.7208
1,1,"Lines, Miss. Mary Conover",female,16,0,1,PC 17592,39.4000
1,1,"Lines, Mrs. Ernest H (Elizabeth Lindsey James)",female,51,0,1,PC 17592,39.4000
0,1,"Long, Mr. Milton Clyde",male,29,0,0,113501,30.0000
1,1,"Longley, Miss. Gretchen Fiske",female,21,0,0,13502,77.9583
0,1,"Loring, Mr. Joseph Holland",male,30,0,0,113801,45.5000
1,1,"Lurette, Miss. Elise",female,58,0,0,PC 17569,146.5208
1,1,"Madill, Miss. Georgette Alexandra",female,15,0,1,24160,211.3375
0,1,"Maguire, Mr. John Edward",male,30,0,0,110469,26.0000
1,1,"Maioni, Miss. Roberta",female,16,0,0,110152,86.5000
1,1,"Marechal, Mr. Pierre",male,0,0,0,11774,29.7000
0,1,"Marvin, Mr. Daniel Warner",male,19,1,0,113773,53.1000
1,1,"Marvin, Mrs. Daniel Warner (Mary Graham Carmichael Farquarson)",female,18,1,0,113773,53.1000
1,1,"Mayne, Mlle. Berthe Antonine (""Mrs de Villiers"")",female,24,0,0,PC 17482,49.5042
0,1,"McCaffry, Mr. Thomas Francis",male,46,0,0,13050,75.2417
0,1,"McCarthy, Mr. Timothy J",male,54,0,0,17463,51.8625
1,1,"McGough, Mr. James Robert",male,36,0,0,PC 17473,26.2875
0,1,"Meyer, Mr. Edgar Joseph",male,28,1,0,PC 17604,82.1708
1,1,"Meyer, Mrs. Edgar Joseph (Leila Saks)",female,0,1,0,PC 17604,82.1708
0,1,"Millet, Mr. Francis Davis",male,65,0,0,13509,26.5500
0,1,"Minahan, Dr. William Edward",male,44,2,0,19928,90.0000
1,1,"Minahan, Miss. Daisy E",female,33,1,0,19928,90.0000
1,1,"Minahan, Mrs. William Edward (Lillian E Thorpe)",female,37,1,0,19928,90.0000
1,1,"Mock, Mr. Philipp Edmund",male,30,1,0,13236,57.7500
0,1,"Molson, Mr. Harry Markland",male,55,0,0,113787,30.5000
0,1,"Moore, Mr. Clarence Bloomfield",male,47,0,0,113796,42.4000
0,1,"Natsch, Mr. Charles H",male,37,0,1,PC 17596,29.7000
1,1,"Newell, Miss. Madeleine",female,31,1,0,35273,113.2750
1,1,"Newell, Miss. Marjorie",female,23,1,0,35273,113.2750
0,1,"Newell, Mr. Arthur Webster",male,58,0,2,35273,113.2750
1,1,"Newsom, Miss. Helen Monypeny",female,19,0,2,11752,26.2833
0,1,"Nicholson, Mr. Arthur Ernest",male,64,0,0,693,26.0000
1,1,"Oliva y Ocana, Dona. Fermina",female,39,0,0,PC 17758,108.9000
1,1,"Omont, Mr. Alfred Fernand",male,0,0,0,F.C. 12998,25.7417
1,1,"Ostby, Miss. Helene Ragnhild",female,22,0,1,113509,61.9792
0,1,"Ostby, Mr. Engelhart Cornelius",male,65,0,1,113509,61.9792
0,1,"Ovies y Rodriguez, Mr. Servando",male,28.5,0,0,PC 17562,27.7208
0,1,"Parr, Mr. William Henry Marsh",male,0,0,0,112052,0.0000
0,1,"Partner, Mr. Austen",male,45.5,0,0,113043,28.5000
0,1,"Payne, Mr. Vivian Ponsonby",male,23,0,0,12749,93.5000
0,1,"Pears, Mr. Thomas Clinton",male,29,1,0,113776,66.6000
1,1,"Pears, Mrs. Thomas (Edith Wearne)",female,22,1,0,113776,66.6000
0,1,"Penasco y Castellana, Mr. Victor de Satode",male,18,1,0,PC 17758,108.9000
1,1,"Penasco y Castellana, Mrs. Victor de Satode (Maria Josefa Perez de Soto y Vallejo)",female,17,1,0,PC 17758,108.9000
1,1,"Perreault, Miss. Anne",female,30,0,0,12749,93.5000
1,1,"Peuchen, Major. Arthur Godfrey",male,52,0,0,113786,30.5000
0,1,"Porter, Mr. Walter Chamberlain",male,47,0,0,110465,52.0000
1,1,"Potter, Mrs. Thomas Jr (Lily Alexenia Wilson)",female,56,0,1,11767,83.1583
0,1,"Reuchlin, Jonkheer. John George",male,38,0,0,19972,0.0000
1,1,"Rheims, Mr. George Alexander Lucien",male,0,0,0,PC 17607,39.6000
0,1,"Ringhini, Mr. Sante",male,22,0,0,PC 17760,135.6333
0,1,"Robbins, Mr. Victor",male,0,0,0,PC 17757,227.5250
1,1,"Robert, Mrs. Edward Scott (Elisabeth Walton McMillan)",female,43,0,1,24160,211.3375
0,1,"Roebling, Mr. Washington Augustus II",male,31,0,0,PC 17590,50.4958
1,1,"Romaine, Mr. Charles Hallace (""Mr C Rolmane"")",male,45,0,0,111428,26.5500
0,1,"Rood, Mr. Hugh Roscoe",male,0,0,0,113767,50.0000
1,1,"Rosenbaum, Miss. Edith Louise",female,33,0,0,PC 17613,27.7208
0,1,"Rosenshine, Mr. George (""Mr George Thorne"")",male,46,0,0,PC 17585,79.2000
0,1,"Ross, Mr. John Hugo",male,36,0,0,13049,40.1250
1,1,"Rothes, the Countess. of (Lucy Noel Martha Dyer-Edwards)",female,33,0,0,110152,86.5000
0,1,"Rothschild, Mr. Martin",male,55,1,0,PC 17603,59.4000
1,1,"Rothschild, Mrs. Martin (Elizabeth L. Barrett)",female,54,1,0,PC 17603,59.4000
0,1,"Rowe, Mr. Alfred G",male,33,0,0,113790,26.5500
1,1,"Ryerson, Master. John Borie",male,13,2,2,PC 17608,262.3750
1,1,"Ryerson, Miss. Emily Borie",female,18,2,2,PC 17608,262.3750
1,1,"Ryerson, Miss. Susan Parker ""Suzette""",female,21,2,2,PC 17608,262.3750
0,1,"Ryerson, Mr. Arthur Larned",male,61,1,3,PC 17608,262.3750
1,1,"Ryerson, Mrs. Arthur Larned (Emily Maria Borie)",female,48,1,3,PC 17608,262.3750
1,1,"Saalfeld, Mr. Adolphe",male,0,0,0,19988,30.5000
1,1,"Sagesser, Mlle. Emma",female,24,0,0,PC 17477,69.3000
1,1,"Salomon, Mr. Abraham L",male,0,0,0,111163,26.0000
1,1,"Schabert, Mrs. Paul (Emma Mock)",female,35,1,0,13236,57.7500
1,1,"Serepeca, Miss. Augusta",female,30,0,0,113798,31.0000
1,1,"Seward, Mr. Frederic Kimber",male,34,0,0,113794,26.5500
1,1,"Shutes, Miss. Elizabeth W",female,40,0,0,PC 17582,153.4625
1,1,"Silverthorne, Mr. Spencer Victor",male,35,0,0,PC 17475,26.2875
0,1,"Silvey, Mr. William Baird",male,50,1,0,13507,55.9000
1,1,"Silvey, Mrs. William Baird (Alice Munger)",female,39,1,0,13507,55.9000
1,1,"Simonius-Blumer, Col. Oberst Alfons",male,56,0,0,13213,35.5000
1,1,"Sloper, Mr. William Thompson",male,28,0,0,113788,35.5000
0,1,"Smart, Mr. John Montgomery",male,56,0,0,113792,26.5500
0,1,"Smith, Mr. James Clinch",male,56,0,0,17764,30.6958
0,1,"Smith, Mr. Lucien Philip",male,24,1,0,13695,60.0000
0,1,"Smith, Mr. Richard William",male,0,0,0,113056,26.0000
1,1,"Smith, Mrs. Lucien Philip (Mary Eloise Hughes)",female,18,1,0,13695,60.0000
1,1,"Snyder, Mr. John Pillsbury",male,24,1,0,21228,82.2667
1,1,"Snyder, Mrs. John Pillsbury (Nelle Stevenson)",female,23,1,0,21228,82.2667
1,1,"Spedden, Master. Robert Douglas",male,6,0,2,16966,134.5000
1,1,"Spedden, Mr. Frederic Oakley",male,45,1,1,16966,134.5000
1,1,"Spedden, Mrs. Frederic Oakley (Margaretta Corning Stone)",female,40,1,1,16966,134.5000
0,1,"Spencer, Mr. William Augustus",male,57,1,0,PC 17569,146.5208
1,1,"Spencer, Mrs. William Augustus (Marie Eugenie)",female,0,1,0,PC 17569,146.5208
1,1,"Stahelin-Maeglin, Dr. Max",male,32,0,0,13214,30.5000
0,1,"Stead, Mr. William Thomas",male,62,0,0,113514,26.5500
1,1,"Stengel, Mr. Charles Emil Henry",male,54,1,0,11778,55.4417
1,1,"Stengel, Mrs. Charles Emil Henry (Annie May Morris)",female,43,1,0,11778,55.4417
1,1,"Stephenson, Mrs. Walter Bertram (Martha Eustis)",female,52,1,0,36947,78.2667
0,1,"Stewart, Mr. Albert A",male,0,0,0,PC 17605,27.7208
1,1,"Stone, Mrs. George Nelson (Martha Evelyn)",female,62,0,0,113572,80.0000
0,1,"Straus, Mr. Isidor",male,67,1,0,PC 17483,221.7792
0,1,"Straus, Mrs. Isidor (Rosalie Ida Blun)",female,63,1,0,PC 17483,221.7792
0,1,"Sutton, Mr. Frederick",male,61,0,0,36963,32.3208
1,1,"Swift, Mrs. Frederick Joel (Margaret Welles Barron)",female,48,0,0,17466,25.9292
1,1,"Taussig, Miss. Ruth",female,18,0,2,110413,79.6500
0,1,"Taussig, Mr. Emil",male,52,1,1,110413,79.6500
1,1,"Taussig, Mrs. Emil (Tillie Mandelbaum)",female,39,1,1,110413,79.6500
1,1,"Taylor, Mr. Elmer Zebley",male,48,1,0,19996,52.0000
1,1,"Taylor, Mrs. Elmer Zebley (Juliet Cummins Wright)",female,0,1,0,19996,52.0000
0,1,"Thayer, Mr. John Borland",male,49,1,1,17421,110.8833
1,1,"Thayer, Mr. John Borland Jr",male,17,0,2,17421,110.8833
1,1,"Thayer, Mrs. John Borland (Marian Longstreth Morris)",female,39,1,1,17421,110.8833
1,1,"Thorne, Mrs. Gertrude Maybelle",female,0,0,0,PC 17585,79.2000
1,1,"Tucker, Mr. Gilbert Milligan Jr",male,31,0,0,2543,28.5375
0,1,"Uruchurtu, Don. Manuel E",male,40,0,0,PC 17601,27.7208
0,1,"Van der hoef, Mr. Wyckoff",male,61,0,0,111240,33.5000
0,1,"Walker, Mr. William Anderson",male,47,0,0,36967,34.0208
1,1,"Ward, Miss. Anna",female,35,0,0,PC 17755,512.3292
0,1,"Warren, Mr. Frank Manley",male,64,1,0,110813,75.2500
1,1,"Warren, Mrs. Frank Manley (Anna Sophia Atkinson)",female,60,1,0,110813,75.2500
0,1,"Weir, Col. John",male,60,0,0,113800,26.5500
0,1,"White, Mr. Percival Wayland",male,54,0,1,35281,77.2875
0,1,"White, Mr. Richard Frasar",male,21,0,1,35281,77.2875
1,1,"White, Mrs. John Stuart (Ella Holmes)",female,55,0,0,PC 17760,135.6333
1,1,"Wick, Miss. Mary Natalie",female,31,0,2,36928,164.8667
0,1,"Wick, Mr. George Dennick",male,57,1,1,36928,164.8667
1,1,"Wick, Mrs. George Dennick (Mary Hitchcock)",female,45,1,1,36928,164.8667
0,1,"Widener, Mr. George Dunton",male,50,1,1,113503,211.5000
0,1,"Widener, Mr. Harry Elkins",male,27,0,2,113503,211.5000
1,1,"Widener, Mrs. George Dunton (Eleanor Elkins)",female,50,1,1,113503,211.5000
1,1,"Willard, Miss. Constance",female,21,0,0,113795,26.5500
0,1,"Williams, Mr. Charles Duane",male,51,0,1,PC 17597,61.3792
1,1,"Williams, Mr. Richard Norris II",male,21,0,1,PC 17597,61.3792
0,1,"Williams-Lambert, Mr. Fletcher Fellows",male,0,0,0,113510,35.0000
1,1,"Wilson, Miss. Helen Alice",female,31,0,0,16966,134.5000
1,1,"Woolner, Mr. Hugh",male,0,0,0,19947,35.5000
0,1,"Wright, Mr. George",male,62,0,0,113807,26.5500
1,1,"Young, Miss. Marie Grice",female,36,0,0,PC 17760,135.6333
0,2,"Abelson, Mr. Samuel",male,30,1,0,P/PP 3381,24.0000
1,2,"Abelson, Mrs. Samuel (Hannah Wizosky)",female,28,1,0,P/PP 3381,24.0000
0,2,"Aldworth, Mr. Charles Augustus",male,30,0,0,248744,13.0000
0,2,"Andrew, Mr. Edgardo Samuel",male,18,0,0,231945,11.5000
0,2,"Andrew, Mr. Frank Thomas",male,25,0,0,C.A. 34050,10.5000
0,2,"Angle, Mr. William A",male,34,1,0,226875,26.0000
1,2,"Angle, Mrs. William A (Florence ""Mary"" Agnes Hughes)",female,36,1,0,226875,26.0000
0,2,"Ashby, Mr. John",male,57,0,0,244346,13.0000
0,2,"Bailey, Mr. Percy Andrew",male,18,0,0,29108,11.5000
0,2,"Baimbrigge, Mr. Charles Robert",male,23,0,0,C.A. 31030,10.5000
1,2,"Ball, Mrs. (Ada E Hall)",female,36,0,0,28551,13.0000
0,2,"Banfield, Mr. Frederick James",male,28,0,0,C.A./SOTON 34068,10.5000
0,2,"Bateman, Rev. Robert James",male,51,0,0,S.O.P. 1166,12.5250
1,2,"Beane, Mr. Edward",male,32,1,0,2908,26.0000
1,2,"Beane, Mrs. Edward (Ethel Clarke)",female,19,1,0,2908,26.0000
0,2,"Beauchamp, Mr. Henry James",male,28,0,0,244358,26.0000
1,2,"Becker, Master. Richard F",male,1,2,1,230136,39.0000
1,2,"Becker, Miss. Marion Louise",female,4,2,1,230136,39.0000
1,2,"Becker, Miss. Ruth Elizabeth",female,12,2,1,230136,39.0000
1,2,"Becker, Mrs. Allen Oliver (Nellie E Baumgardner)",female,36,0,3,230136,39.0000
1,2,"Beesley, Mr. Lawrence",male,34,0,0,248698,13.0000
1,2,"Bentham, Miss. Lilian W",female,19,0,0,28404,13.0000
0,2,"Berriman, Mr. William John",male,23,0,0,28425,13.0000
0,2,"Botsford, Mr. William Hull",male,26,0,0,237670,13.0000
0,2,"Bowenur, Mr. Solomon",male,42,0,0,211535,13.0000
0,2,"Bracken, Mr. James H",male,27,0,0,220367,13.0000
1,2,"Brown, Miss. Amelia ""Mildred""",female,24,0,0,248733,13.0000
1,2,"Brown, Miss. Edith Eileen",female,15,0,2,29750,39.0000
0,2,"Brown, Mr. Thomas William Solomon",male,60,1,1,29750,39.0000
1,2,"Brown, Mrs. Thomas William Solomon (Elizabeth Catherine Ford)",female,40,1,1,29750,39.0000
1,2,"Bryhl, Miss. Dagmar Jenny Ingeborg ",female,20,1,0,236853,26.0000
0,2,"Bryhl, Mr. Kurt Arnold Gottfrid",male,25,1,0,236853,26.0000
1,2,"Buss, Miss. Kate",female,36,0,0,27849,13.0000
0,2,"Butler, Mr. Reginald Fenton",male,25,0,0,234686,13.0000
0,2,"Byles, Rev. Thomas Roussel Davids",male,42,0,0,244310,13.0000
1,2,"Bystrom, Mrs. (Karolina)",female,42,0,0,236852,13.0000
1,2,"Caldwell, Master. Alden Gates",male,0.8333,0,2,248738,29.0000
1,2,"Caldwell, Mr. Albert Francis",male,26,1,1,248738,29.0000
1,2,"Caldwell, Mrs. Albert Francis (Sylvia Mae Harbaugh)",female,22,1,1,248738,29.0000
1,2,"Cameron, Miss. Clear Annie",female,35,0,0,F.C.C. 13528,21.0000
0,2,"Campbell, Mr. William",male,0,0,0,239853,0.0000
0,2,"Carbines, Mr. William",male,19,0,0,28424,13.0000
0,2,"Carter, Mrs. Ernest Courtenay (Lilian Hughes)",female,44,1,0,244252,26.0000
0,2,"Carter, Rev. Ernest Courtenay",male,54,1,0,244252,26.0000
0,2,"Chapman, Mr. Charles Henry",male,52,0,0,248731,13.5000
0,2,"Chapman, Mr. John Henry",male,37,1,0,SC/AH 29037,26.0000
0,2,"Chapman, Mrs. John Henry (Sara Elizabeth Lawry)",female,29,1,0,SC/AH 29037,26.0000
1,2,"Christy, Miss. Julie Rachel",female,25,1,1,237789,30.0000
1,2,"Christy, Mrs. (Alice Frances)",female,45,0,2,237789,30.0000
0,2,"Clarke, Mr. Charles Valentine",male,29,1,0,2003,26.0000
1,2,"Clarke, Mrs. Charles V (Ada Maria Winfield)",female,28,1,0,2003,26.0000
0,2,"Coleridge, Mr. Reginald Charles",male,29,0,0,W./C. 14263,10.5000
0,2,"Collander, Mr. Erik Gustaf",male,28,0,0,248740,13.0000
1,2,"Collett, Mr. Sidney C Stuart",male,24,0,0,28034,10.5000
1,2,"Collyer, Miss. Marjorie ""Lottie""",female,8,0,2,C.A. 31921,26.2500
0,2,"Collyer, Mr. Harvey",male,31,1,1,C.A. 31921,26.2500
1,2,"Collyer, Mrs. Harvey (Charlotte Annie Tate)",female,31,1,1,C.A. 31921,26.2500
1,2,"Cook, Mrs. (Selena Rogers)",female,22,0,0,W./C. 14266,10.5000
0,2,"Corbett, Mrs. Walter H (Irene Colvin)",female,30,0,0,237249,13.0000
0,2,"Corey, Mrs. Percy C (Mary Phyllis Elizabeth Miller)",female,0,0,0,F.C.C. 13534,21.0000
0,2,"Cotterill, Mr. Henry ""Harry""",male,21,0,0,29107,11.5000
0,2,"Cunningham, Mr. Alfred Fleming",male,0,0,0,239853,0.0000
1,2,"Davies, Master. John Morgan Jr",male,8,1,1,C.A. 33112,36.7500
0,2,"Davies, Mr. Charles Henry",male,18,0,0,S.O.C. 14879,73.5000
1,2,"Davies, Mrs. John Morgan (Elizabeth Agnes Mary White) ",female,48,0,2,C.A. 33112,36.7500
1,2,"Davis, Miss. Mary",female,28,0,0,237668,13.0000
0,2,"de Brito, Mr. Jose Joaquim",male,32,0,0,244360,13.0000
0,2,"Deacon, Mr. Percy William",male,17,0,0,S.O.C. 14879,73.5000
0,2,"del Carlo, Mr. Sebastiano",male,29,1,0,SC/PARIS 2167,27.7208
1,2,"del Carlo, Mrs. Sebastiano (Argenia Genovesi)",female,24,1,0,SC/PARIS 2167,27.7208
0,2,"Denbury, Mr. Herbert",male,25,0,0,C.A. 31029,31.5000
0,2,"Dibden, Mr. William",male,18,0,0,S.O.C. 14879,73.5000
1,2,"Doling, Miss. Elsie",female,18,0,1,231919,23.0000
1,2,"Doling, Mrs. John T (Ada Julia Bone)",female,34,0,1,231919,23.0000
0,2,"Downton, Mr. William James",male,54,0,0,28403,26.0000
1,2,"Drew, Master. Marshall Brines",male,8,0,2,28220,32.5000
0,2,"Drew, Mr. James Vivian",male,42,1,1,28220,32.5000
1,2,"Drew, Mrs. James Vivian (Lulu Thorne Christian)",female,34,1,1,28220,32.5000
1,2,"Duran y More, Miss. Asuncion",female,27,1,0,SC/PARIS 2149,13.8583
1,2,"Duran y More, Miss. Florentina",female,30,1,0,SC/PARIS 2148,13.8583
0,2,"Eitemiller, Mr. George Floyd",male,23,0,0,29751,13.0000
0,2,"Enander, Mr. Ingvar",male,21,0,0,236854,13.0000
0,2,"Fahlstrom, Mr. Arne Jonas",male,18,0,0,236171,13.0000
0,2,"Faunthorpe, Mr. Harry",male,40,1,0,2926,26.0000
1,2,"Faunthorpe, Mrs. Lizzie (Elizabeth Anne Wilkinson)",female,29,1,0,2926,26.0000
0,2,"Fillbrook, Mr. Joseph Charles",male,18,0,0,C.A. 15185,10.5000
0,2,"Fox, Mr. Stanley Hubert",male,36,0,0,229236,13.0000
0,2,"Frost, Mr. Anthony Wood ""Archie""",male,0,0,0,239854,0.0000
0,2,"Funk, Miss. Annie Clemmer",female,38,0,0,237671,13.0000
0,2,"Fynney, Mr. Joseph J",male,35,0,0,239865,26.0000
0,2,"Gale, Mr. Harry",male,38,1,0,28664,21.0000
0,2,"Gale, Mr. Shadrach",male,34,1,0,28664,21.0000
1,2,"Garside, Miss. Ethel",female,34,0,0,243880,13.0000
0,2,"Gaskell, Mr. Alfred",male,16,0,0,239865,26.0000
0,2,"Gavey, Mr. Lawrence",male,26,0,0,31028,10.5000
0,2,"Gilbert, Mr. William",male,47,0,0,C.A. 30769,10.5000
0,2,"Giles, Mr. Edgar",male,21,1,0,28133,11.5000
0,2,"Giles, Mr. Frederick Edward",male,21,1,0,28134,11.5000
0,2,"Giles, Mr. Ralph",male,24,0,0,248726,13.5000
0,2,"Gill, Mr. John William",male,24,0,0,233866,13.0000
0,2,"Gillespie, Mr. William Henry",male,34,0,0,12233,13.0000
0,2,"Givard, Mr. Hans Kristensen",male,30,0,0,250646,13.0000
0,2,"Greenberg, Mr. Samuel",male,52,0,0,250647,13.0000
0,2,"Hale, Mr. Reginald",male,30,0,0,250653,13.0000
1,2,"Hamalainen, Master. Viljo",male,0.6667,1,1,250649,14.5000
1,2,"Hamalainen, Mrs. William (Anna)",female,24,0,2,250649,14.5000
0,2,"Harbeck, Mr. William H",male,44,0,0,248746,13.0000
1,2,"Harper, Miss. Annie Jessie ""Nina""",female,6,0,1,248727,33.0000
0,2,"Harper, Rev. John",male,28,0,1,248727,33.0000
1,2,"Harris, Mr. George",male,62,0,0,S.W./PP 752,10.5000
0,2,"Harris, Mr. Walter",male,30,0,0,W/C 14208,10.5000
1,2,"Hart, Miss. Eva Miriam",female,7,0,2,F.C.C. 13529,26.2500
0,2,"Hart, Mr. Benjamin",male,43,1,1,F.C.C. 13529,26.2500
1,2,"Hart, Mrs. Benjamin (Esther Ada Bloomfield)",female,45,1,1,F.C.C. 13529,26.2500
1,2,"Herman, Miss. Alice",female,24,1,2,220845,65.0000
1,2,"Herman, Miss. Kate",female,24,1,2,220845,65.0000
0,2,"Herman, Mr. Samuel",male,49,1,2,220845,65.0000
1,2,"Herman, Mrs. Samuel (Jane Laver)",female,48,1,2,220845,65.0000
1,2,"Hewlett, Mrs. (Mary D Kingcome) ",female,55,0,0,248706,16.0000
0,2,"Hickman, Mr. Leonard Mark",male,24,2,0,S.O.C. 14879,73.5000
0,2,"Hickman, Mr. Lewis",male,32,2,0,S.O.C. 14879,73.5000
0,2,"Hickman, Mr. Stanley George",male,21,2,0,S.O.C. 14879,73.5000
0,2,"Hiltunen, Miss. Marta",female,18,1,1,250650,13.0000
1,2,"Hocking, Miss. Ellen ""Nellie""",female,20,2,1,29105,23.0000
0,2,"Hocking, Mr. Richard George",male,23,2,1,29104,11.5000
0,2,"Hocking, Mr. Samuel James Metcalfe",male,36,0,0,242963,13.0000
1,2,"Hocking, Mrs. Elizabeth (Eliza Needs)",female,54,1,3,29105,23.0000
0,2,"Hodges, Mr. Henry Price",male,50,0,0,250643,13.0000
0,2,"Hold, Mr. Stephen",male,44,1,0,26707,26.0000
1,2,"Hold, Mrs. Stephen (Annie Margaret Hill)",female,29,1,0,26707,26.0000
0,2,"Hood, Mr. Ambrose Jr",male,21,0,0,S.O.C. 14879,73.5000
1,2,"Hosono, Mr. Masabumi",male,42,0,0,237798,13.0000
0,2,"Howard, Mr. Benjamin",male,63,1,0,24065,26.0000
0,2,"Howard, Mrs. Benjamin (Ellen Truelove Arman)",female,60,1,0,24065,26.0000
0,2,"Hunt, Mr. George Henry",male,33,0,0,SCO/W 1585,12.2750
1,2,"Ilett, Miss. Bertha",female,17,0,0,SO/C 14885,10.5000
0,2,"Jacobsohn, Mr. Sidney Samuel",male,42,1,0,243847,27.0000
1,2,"Jacobsohn, Mrs. Sidney Samuel (Amy Frances Christy)",female,24,2,1,243847,27.0000
0,2,"Jarvis, Mr. John Denzil",male,47,0,0,237565,15.0000
0,2,"Jefferys, Mr. Clifford Thomas",male,24,2,0,C.A. 31029,31.5000
0,2,"Jefferys, Mr. Ernest Wilfred",male,22,2,0,C.A. 31029,31.5000
0,2,"Jenkin, Mr. Stephen Curnow",male,32,0,0,C.A. 33111,10.5000
1,2,"Jerwan, Mrs. Amin S (Marie Marthe Thuillard)",female,23,0,0,SC/AH Basle 541,13.7917
0,2,"Kantor, Mr. Sinai",male,34,1,0,244367,26.0000
1,2,"Kantor, Mrs. Sinai (Miriam Sternin)",female,24,1,0,244367,26.0000
0,2,"Karnes, Mrs. J Frank (Claire Bennett)",female,22,0,0,F.C.C. 13534,21.0000
1,2,"Keane, Miss. Nora A",female,0,0,0,226593,12.3500
0,2,"Keane, Mr. Daniel",male,35,0,0,233734,12.3500
1,2,"Kelly, Mrs. Florence ""Fannie""",female,45,0,0,223596,13.5000
0,2,"Kirkland, Rev. Charles Leonard",male,57,0,0,219533,12.3500
0,2,"Knight, Mr. Robert J",male,0,0,0,239855,0.0000
0,2,"Kvillner, Mr. Johan Henrik Johannesson",male,31,0,0,C.A. 18723,10.5000
0,2,"Lahtinen, Mrs. William (Anna Sylfven)",female,26,1,1,250651,26.0000
0,2,"Lahtinen, Rev. William",male,30,1,1,250651,26.0000
0,2,"Lamb, Mr. John Joseph",male,0,0,0,240261,10.7083
1,2,"Laroche, Miss. Louise",female,1,1,2,SC/Paris 2123,41.5792
1,2,"Laroche, Miss. Simonne Marie Anne Andree",female,3,1,2,SC/Paris 2123,41.5792
0,2,"Laroche, Mr. Joseph Philippe Lemercier",male,25,1,2,SC/Paris 2123,41.5792
1,2,"Laroche, Mrs. Joseph (Juliette Marie Louise Lafargue)",female,22,1,2,SC/Paris 2123,41.5792
1,2,"Lehmann, Miss. Bertha",female,17,0,0,SC 1748,12.0000
1,2,"Leitch, Miss. Jessie Wills",female,0,0,0,248727,33.0000
1,2,"Lemore, Mrs. (Amelia Milley)",female,34,0,0,C.A. 34260,10.5000
0,2,"Levy, Mr. Rene Jacques",male,36,0,0,SC/Paris 2163,12.8750
0,2,"Leyson, Mr. Robert William Norman",male,24,0,0,C.A. 29566,10.5000
0,2,"Lingane, Mr. John",male,61,0,0,235509,12.3500
0,2,"Louch, Mr. Charles Alexander",male,50,1,0,SC/AH 3085,26.0000
1,2,"Louch, Mrs. Charles Alexander (Alice Adelaide Slow)",female,42,1,0,SC/AH 3085,26.0000
0,2,"Mack, Mrs. (Mary)",female,57,0,0,S.O./P.P. 3,10.5000
0,2,"Malachard, Mr. Noel",male,0,0,0,237735,15.0458
1,2,"Mallet, Master. Andre",male,1,0,2,S.C./PARIS 2079,37.0042
0,2,"Mallet, Mr. Albert",male,31,1,1,S.C./PARIS 2079,37.0042
1,2,"Mallet, Mrs. Albert (Antoinette Magnin)",female,24,1,1,S.C./PARIS 2079,37.0042
0,2,"Mangiavacchi, Mr. Serafino Emilio",male,0,0,0,SC/A.3 2861,15.5792
0,2,"Matthews, Mr. William John",male,30,0,0,28228,13.0000
0,2,"Maybery, Mr. Frank Hubert",male,40,0,0,239059,16.0000
0,2,"McCrae, Mr. Arthur Gordon",male,32,0,0,237216,13.5000
0,2,"McCrie, Mr. James Matthew",male,30,0,0,233478,13.0000
0,2,"McKane, Mr. Peter David",male,46,0,0,28403,26.0000
1,2,"Mellinger, Miss. Madeleine Violet",female,13,0,1,250644,19.5000
1,2,"Mellinger, Mrs. (Elizabeth Anne Maidment)",female,41,0,1,250644,19.5000
1,2,"Mellors, Mr. William John",male,19,0,0,SW/PP 751,10.5000
0,2,"Meyer, Mr. August",male,39,0,0,248723,13.0000
0,2,"Milling, Mr. Jacob Christian",male,48,0,0,234360,13.0000
0,2,"Mitchell, Mr. Henry Michael",male,70,0,0,C.A. 24580,10.5000
0,2,"Montvila, Rev. Juozas",male,27,0,0,211536,13.0000
0,2,"Moraweck, Dr. Ernest",male,54,0,0,29011,14.0000
0,2,"Morley, Mr. Henry Samuel (""Mr Henry Marshall"")",male,39,0,0,250655,26.0000
0,2,"Mudd, Mr. Thomas Charles",male,16,0,0,S.O./P.P. 3,10.5000
0,2,"Myles, Mr. Thomas Francis",male,62,0,0,240276,9.6875
0,2,"Nasser, Mr. Nicholas",male,32.5,1,0,237736,30.0708
1,2,"Nasser, Mrs. Nicholas (Adele Achem)",female,14,1,0,237736,30.0708
1,2,"Navratil, Master. Edmond Roger",male,2,1,1,230080,26.0000
1,2,"Navratil, Master. Michel M",male,3,1,1,230080,26.0000
0,2,"Navratil, Mr. Michel (""Louis M Hoffman"")",male,36.5,0,2,230080,26.0000
0,2,"Nesson, Mr. Israel",male,26,0,0,244368,13.0000
0,2,"Nicholls, Mr. Joseph Charles",male,19,1,1,C.A. 33112,36.7500
0,2,"Norman, Mr. Robert Douglas",male,28,0,0,218629,13.5000
1,2,"Nourney, Mr. Alfred (""Baron von Drachstedt"")",male,20,0,0,SC/PARIS 2166,13.8625
1,2,"Nye, Mrs. (Elizabeth Ramell)",female,29,0,0,C.A. 29395,10.5000
0,2,"Otter, Mr. Richard",male,39,0,0,28213,13.0000
1,2,"Oxenham, Mr. Percy Thomas",male,22,0,0,W./C. 14260,10.5000
1,2,"Padro y Manent, Mr. Julian",male,0,0,0,SC/PARIS 2146,13.8625
0,2,"Pain, Dr. Alfred",male,23,0,0,244278,10.5000
1,2,"Pallas y Castello, Mr. Emilio",male,29,0,0,SC/PARIS 2147,13.8583
0,2,"Parker, Mr. Clifford Richard",male,28,0,0,SC 14888,10.5000
0,2,"Parkes, Mr. Francis ""Frank""",male,0,0,0,239853,0.0000
1,2,"Parrish, Mrs. (Lutie Davis)",female,50,0,1,230433,26.0000
0,2,"Pengelly, Mr. Frederick William",male,19,0,0,28665,10.5000
0,2,"Pernot, Mr. Rene",male,0,0,0,SC/PARIS 2131,15.0500
0,2,"Peruschitz, Rev. Joseph Maria",male,41,0,0,237393,13.0000
1,2,"Phillips, Miss. Alice Frances Louisa",female,21,0,1,S.O./P.P. 2,21.0000
1,2,"Phillips, Miss. Kate Florence (""Mrs Kate Louise Phillips Marshall"")",female,19,0,0,250655,26.0000
0,2,"Phillips, Mr. Escott Robert",male,43,0,1,S.O./P.P. 2,21.0000
1,2,"Pinsky, Mrs. (Rosa)",female,32,0,0,234604,13.0000
0,2,"Ponesell, Mr. Martin",male,34,0,0,250647,13.0000
1,2,"Portaluppi, Mr. Emilio Ilario Giuseppe",male,30,0,0,C.A. 34644,12.7375
0,2,"Pulbaum, Mr. Franz",male,27,0,0,SC/PARIS 2168,15.0333
1,2,"Quick, Miss. Phyllis May",female,2,1,1,26360,26.0000
1,2,"Quick, Miss. Winifred Vera",female,8,1,1,26360,26.0000
1,2,"Quick, Mrs. Frederick Charles (Jane Richards)",female,33,0,2,26360,26.0000
0,2,"Reeves, Mr. David",male,36,0,0,C.A. 17248,10.5000
0,2,"Renouf, Mr. Peter Henry",male,34,1,0,31027,21.0000
1,2,"Renouf, Mrs. Peter Henry (Lillian Jefferys)",female,30,3,0,31027,21.0000
1,2,"Reynaldo, Ms. Encarnacion",female,28,0,0,230434,13.0000
0,2,"Richard, Mr. Em
Download .txt
gitextract_0dylqyzr/

├── Chapter02/
│   ├── Python 2.7/
│   │   ├── computation_model.py
│   │   ├── data_model.py
│   │   ├── feeding_parameters.py
│   │   ├── fetching_parameters_1.py
│   │   ├── programming_model.py
│   │   ├── single_neuron_model_1.py
│   │   ├── tensor_flow_counter_1.py
│   │   └── tensor_with_numpy_1.py
│   └── Python 3.5/
│       ├── computation_model.py
│       ├── data_model.py
│       ├── feeding_parameters.py
│       ├── fetching_parameters_1.py
│       ├── programming_model.py
│       ├── single_neuron_model_1.py
│       ├── tensor_flow_counter_1.py
│       └── tensor_with_numpy_1.py
├── Chapter03/
│   ├── Python 2.7/
│   │   ├── five_layers_relu_1.py
│   │   ├── five_layers_relu_dropout_1.py
│   │   ├── five_layers_sigmoid_1.py
│   │   ├── softmax_classifier_1.py
│   │   ├── softmax_model_loader_1.py
│   │   └── softmax_model_saver_1.py
│   └── Python 3.5/
│       ├── five_layers_relu_1.py
│       ├── five_layers_relu_dropout_1.py
│       ├── five_layers_sigmoid_1.py
│       ├── softmax_classifier_1.py
│       ├── softmax_model_loader_1.py
│       └── softmax_model_saver_1.py
├── Chapter04/
│   ├── EMOTION_CNN/
│   │   ├── EmotionDetector/
│   │   │   ├── test.csv
│   │   │   └── train.csv
│   │   ├── Python 2.7/
│   │   │   ├── EmotionDetectorUtils.py
│   │   │   ├── EmotionDetector_1.py
│   │   │   └── test_your_image.py
│   │   └── Python 3.5/
│   │       ├── EmotionDetectorUtils.py
│   │       ├── EmotionDetector_1.py
│   │       ├── __init__.py
│   │       └── test_your_image.py
│   └── MNIST_CNN/
│       ├── Python 2.7/
│       │   └── mnist_cnn_1.py
│       └── Python 3.5/
│           └── mnist_cnn_1.py
├── Chapter05/
│   ├── Python 2.7/
│   │   ├── Convlutional_AutoEncoder.py
│   │   ├── autoencoder_1.py
│   │   ├── deconvolutional_autoencoder_1.py
│   │   └── denoising_autoencoder_1.py
│   └── Python 3.5/
│       ├── Convlutional_AutoEncoder.py
│       ├── __init__.py
│       ├── autoencoder_1.py
│       ├── deconvolutional_autoencoder_1.py
│       └── denoising_autoencoder_1.py
├── Chapter06/
│   ├── Python 2.7/
│   │   ├── LSTM_model_1.py
│   │   ├── __init__.py
│   │   └── bidirectional_RNN_1.py
│   └── Python 3.5/
│       ├── LSTM_model_1.py
│       ├── __init__.py
│       └── bidirectional_RNN_1.py
├── Chapter07/
│   ├── Python 2.7/
│   │   ├── gpu_computing_with_multiple_GPU.py
│   │   ├── gpu_example.py
│   │   └── gpu_soft_placemnet_1.py
│   └── Python 3.5/
│       ├── gpu_computing_with_multiple_GPU.py
│       ├── gpu_example.py
│       └── gpu_soft_placemnet_1.py
├── Chapter08/
│   ├── Python 2.7/
│   │   ├── digit_classifier.py
│   │   ├── keras_movie_classifier_1.py
│   │   ├── keras_movie_classifier_using_convLayer_1.py
│   │   ├── pretty_tensor_digit_1.py
│   │   └── tflearn_titanic_classifier.py
│   ├── Python 3.5/
│   │   ├── __init__.py
│   │   ├── digit_classifier.py
│   │   ├── keras_movie_classifier_1.py
│   │   ├── keras_movie_classifier_using_convLayer_1.py
│   │   ├── pretty_tensor_digit_1.py
│   │   └── tflearn_titanic_classifier.py
│   └── data/
│       └── titanic_dataset.csv
├── Chapter09/
│   ├── Python 2.7/
│   │   └── classify_image.py
│   └── Python 3.5/
│       └── classify_image.py
├── Chapter10/
│   ├── Python 2.7/
│   │   ├── FrozenLake_1.py
│   │   └── Q_Learning_1.py
│   └── Python 3.5/
│       ├── FrozenLake_1.py
│       └── Q_Learning_1.py
├── LICENSE
└── README.md
Download .txt
SYMBOL INDEX (66 symbols across 30 files)

FILE: Chapter04/EMOTION_CNN/Python 2.7/EmotionDetectorUtils.py
  class testResult (line 19) | class testResult:
    method __init__ (line 21) | def __init__(self):
    method evaluate (line 30) | def evaluate(self,label):
    method display_result (line 47) | def display_result(self,evaluations):
  function read_data (line 57) | def read_data(data_dir, force=False):

FILE: Chapter04/EMOTION_CNN/Python 2.7/EmotionDetector_1.py
  function add_to_regularization_loss (line 29) | def add_to_regularization_loss(W, b):
  function weight_variable (line 33) | def weight_variable(shape, stddev=0.02, name=None):
  function bias_variable (line 41) | def bias_variable(shape, name=None):
  function conv2d_basic (line 48) | def conv2d_basic(x, W, bias):
  function max_pool_2x2 (line 52) | def max_pool_2x2(x):
  function emotion_cnn (line 57) | def emotion_cnn(dataset):
  function loss (line 118) | def loss(pred, label):
  function train (line 126) | def train(loss, step):
  function get_next_batch (line 130) | def get_next_batch(images, labels, step):
  function main (line 137) | def main(argv=None):

FILE: Chapter04/EMOTION_CNN/Python 2.7/test_your_image.py
  function rgb2gray (line 18) | def rgb2gray(rgb):

FILE: Chapter04/EMOTION_CNN/Python 3.5/EmotionDetectorUtils.py
  class testResult (line 19) | class testResult:
    method __init__ (line 21) | def __init__(self):
    method evaluate (line 30) | def evaluate(self,label):
    method display_result (line 47) | def display_result(self,evaluations):
  function read_data (line 57) | def read_data(data_dir, force=False):

FILE: Chapter04/EMOTION_CNN/Python 3.5/EmotionDetector_1.py
  function add_to_regularization_loss (line 29) | def add_to_regularization_loss(W, b):
  function weight_variable (line 33) | def weight_variable(shape, stddev=0.02, name=None):
  function bias_variable (line 41) | def bias_variable(shape, name=None):
  function conv2d_basic (line 48) | def conv2d_basic(x, W, bias):
  function max_pool_2x2 (line 52) | def max_pool_2x2(x):
  function emotion_cnn (line 57) | def emotion_cnn(dataset):
  function loss (line 118) | def loss(pred, label):
  function train (line 126) | def train(loss, step):
  function get_next_batch (line 130) | def get_next_batch(images, labels, step):
  function main (line 137) | def main(argv=None):

FILE: Chapter04/EMOTION_CNN/Python 3.5/test_your_image.py
  function rgb2gray (line 18) | def rgb2gray(rgb):

FILE: Chapter04/MNIST_CNN/Python 2.7/mnist_cnn_1.py
  function init_weights (line 10) | def init_weights(shape):
  function model (line 14) | def model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):

FILE: Chapter04/MNIST_CNN/Python 3.5/mnist_cnn_1.py
  function init_weights (line 10) | def init_weights(shape):
  function model (line 14) | def model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):

FILE: Chapter05/Python 2.7/Convlutional_AutoEncoder.py
  function cae (line 52) | def cae(_X, _W, _b, _keepprob):

FILE: Chapter05/Python 2.7/deconvolutional_autoencoder_1.py
  function plotresult (line 7) | def plotresult(org_vec,noisy_vec,out_vec):

FILE: Chapter05/Python 2.7/denoising_autoencoder_1.py
  function plotresult (line 7) | def plotresult(org_vec,noisy_vec,out_vec):

FILE: Chapter05/Python 3.5/Convlutional_AutoEncoder.py
  function cae (line 52) | def cae(_X, _W, _b, _keepprob):

FILE: Chapter05/Python 3.5/deconvolutional_autoencoder_1.py
  function plotresult (line 7) | def plotresult(org_vec,noisy_vec,out_vec):

FILE: Chapter05/Python 3.5/denoising_autoencoder_1.py
  function plotresult (line 7) | def plotresult(org_vec,noisy_vec,out_vec):

FILE: Chapter06/Python 2.7/LSTM_model_1.py
  function RNN (line 27) | def RNN(x, weights, biases):

FILE: Chapter06/Python 2.7/bidirectional_RNN_1.py
  function BiRNN (line 28) | def BiRNN(x, weights, biases):

FILE: Chapter06/Python 3.5/LSTM_model_1.py
  function RNN (line 27) | def RNN(x, weights, biases):

FILE: Chapter06/Python 3.5/bidirectional_RNN_1.py
  function BiRNN (line 28) | def BiRNN(x, weights, biases):

FILE: Chapter07/Python 2.7/gpu_computing_with_multiple_GPU.py
  function matpow (line 13) | def matpow(M, n):

FILE: Chapter07/Python 2.7/gpu_example.py
  function matpow (line 16) | def matpow(M, n):

FILE: Chapter07/Python 2.7/gpu_soft_placemnet_1.py
  function matpow (line 13) | def matpow(M, n):

FILE: Chapter07/Python 3.5/gpu_computing_with_multiple_GPU.py
  function matpow (line 13) | def matpow(M, n):

FILE: Chapter07/Python 3.5/gpu_example.py
  function matpow (line 12) | def matpow(M, n):

FILE: Chapter07/Python 3.5/gpu_soft_placemnet_1.py
  function matpow (line 13) | def matpow(M, n):

FILE: Chapter08/Python 2.7/digit_classifier.py
  function multilayer_fully_connected (line 15) | def multilayer_fully_connected(images, labels):
  function lenet5 (line 23) | def lenet5(images, labels):
  function main (line 35) | def main(_=None):

FILE: Chapter08/Python 2.7/pretty_tensor_digit_1.py
  function multilayer_fully_connected (line 14) | def multilayer_fully_connected(images, labels):
  function lenet5 (line 20) | def lenet5(images, labels):
  function main (line 26) | def main(_=None):

FILE: Chapter08/Python 2.7/tflearn_titanic_classifier.py
  function preprocess (line 7) | def preprocess(data, columns_to_ignore):

FILE: Chapter08/Python 3.5/digit_classifier.py
  function multilayer_fully_connected (line 20) | def multilayer_fully_connected(images, labels):
  function lenet5 (line 28) | def lenet5(images, labels):
  function main (line 40) | def main(_=None):

FILE: Chapter08/Python 3.5/pretty_tensor_digit_1.py
  function multilayer_fully_connected (line 17) | def multilayer_fully_connected(images, labels):
  function lenet5 (line 23) | def lenet5(images, labels):
  function main (line 29) | def main(_=None):

FILE: Chapter08/Python 3.5/tflearn_titanic_classifier.py
  function preprocess (line 9) | def preprocess(data, columns_to_ignore):
Condensed preview — 80 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (269K chars).
[
  {
    "path": "Chapter02/Python 2.7/computation_model.py",
    "chars": 253,
    "preview": "import tensorflow as tf\nwith tf.Session() as session:\n    x = tf.placeholder(tf.float32,[1],name=\"x\")\n    y = tf.placeho"
  },
  {
    "path": "Chapter02/Python 2.7/data_model.py",
    "chars": 352,
    "preview": "import tensorflow as tf\n\nscalar = tf.constant(100)\nvector = tf.constant([1,2,3,4,5])\nmatrix = tf.constant([[1,2,3],[4,5,"
  },
  {
    "path": "Chapter02/Python 2.7/feeding_parameters.py",
    "chars": 205,
    "preview": "import tensorflow as tf\nimport numpy as np\n\na = 3\nb = 2\n\n\nx = tf.placeholder(tf.float32,shape=(a,b))\ny = tf.add(x,x)\n\nda"
  },
  {
    "path": "Chapter02/Python 2.7/fetching_parameters_1.py",
    "chars": 369,
    "preview": "import tensorflow as tf\n\nconstant_A = tf.constant([100.0])\nconstant_B = tf.constant([300.0])\nconstant_C = tf.constant([3"
  },
  {
    "path": "Chapter02/Python 2.7/programming_model.py",
    "chars": 253,
    "preview": "import tensorflow as tf\nwith tf.Session() as session:\n    x = tf.placeholder(tf.float32,[1],name=\"x\")\n    y = tf.placeho"
  },
  {
    "path": "Chapter02/Python 2.7/single_neuron_model_1.py",
    "chars": 756,
    "preview": "import tensorflow as tf\n\nweight = tf.Variable(1.0,name=\"weight\")\ninput_value = tf.constant(0.5,name=\"input_value\")\nexpec"
  },
  {
    "path": "Chapter02/Python 2.7/tensor_flow_counter_1.py",
    "chars": 420,
    "preview": "import tensorflow as tf\n\nvalue = tf.Variable(0,name=\"value\")\none = tf.constant(1)\nnew_value = tf.add(value,one)\nupdate_v"
  },
  {
    "path": "Chapter02/Python 2.7/tensor_with_numpy_1.py",
    "chars": 1524,
    "preview": "import tensorflow as tf\nimport numpy as np\n\n#tensore 1d con valori costanti\ntensor_1d = np.array([1,2,3,4,5,6,7,8,9,10])"
  },
  {
    "path": "Chapter02/Python 3.5/computation_model.py",
    "chars": 259,
    "preview": "import tensorflow as tf\nwith tf.Session() as session:\n    x = tf.placeholder(tf.float32, [1], name=\"x\")\n    y = tf.place"
  },
  {
    "path": "Chapter02/Python 3.5/data_model.py",
    "chars": 368,
    "preview": "import tensorflow as tf\n\nscalar = tf.constant(100)\nvector = tf.constant([1, 2, 3, 4, 5])\nmatrix = tf.constant([[1, 2, 3]"
  },
  {
    "path": "Chapter02/Python 3.5/feeding_parameters.py",
    "chars": 210,
    "preview": "import tensorflow as tf\nimport numpy as np\n\na = 3\nb = 2\n\nx = tf.placeholder(tf.float32, shape=(a, b))\ny = tf.add(x, x)\n\n"
  },
  {
    "path": "Chapter02/Python 3.5/fetching_parameters_1.py",
    "chars": 372,
    "preview": "import tensorflow as tf\n\nconstant_A = tf.constant([100.0])\nconstant_B = tf.constant([300.0])\nconstant_C = tf.constant([3"
  },
  {
    "path": "Chapter02/Python 3.5/programming_model.py",
    "chars": 261,
    "preview": "import tensorflow as tf\n\nwith tf.Session() as session:\n    x = tf.placeholder(tf.float32, [1], name=\"x\")\n    y = tf.plac"
  },
  {
    "path": "Chapter02/Python 3.5/single_neuron_model_1.py",
    "chars": 770,
    "preview": "import tensorflow as tf\n\nweight = tf.Variable(1.0, name=\"weight\")\ninput_value = tf.constant(0.5, name=\"input_value\")\nexp"
  },
  {
    "path": "Chapter02/Python 3.5/tensor_flow_counter_1.py",
    "chars": 425,
    "preview": "import tensorflow as tf\n\nvalue = tf.Variable(0, name=\"value\")\none = tf.constant(1)\nnew_value = tf.add(value, one)\nupdate"
  },
  {
    "path": "Chapter02/Python 3.5/tensor_with_numpy_1.py",
    "chars": 1545,
    "preview": "import tensorflow as tf\nimport numpy as np\n\n#tensore 1d con valori costanti\ntensor_1d = np.array([1, 2, 3, 4, 5, 6, 7, 8"
  },
  {
    "path": "Chapter03/Python 2.7/five_layers_relu_1.py",
    "chars": 2807,
    "preview": "from tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\nimport math\n\nlogs_path = 'log_simple_"
  },
  {
    "path": "Chapter03/Python 2.7/five_layers_relu_dropout_1.py",
    "chars": 2847,
    "preview": "from tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\nimport math\n\nlogs_path = 'log_simple_"
  },
  {
    "path": "Chapter03/Python 2.7/five_layers_sigmoid_1.py",
    "chars": 2457,
    "preview": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport math\n\nlogs_path = 'log_simple_"
  },
  {
    "path": "Chapter03/Python 2.7/softmax_classifier_1.py",
    "chars": 2205,
    "preview": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\nfrom "
  },
  {
    "path": "Chapter03/Python 2.7/softmax_model_loader_1.py",
    "chars": 766,
    "preview": "import matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nfrom random import randint\nfrom tensorflow.ex"
  },
  {
    "path": "Chapter03/Python 2.7/softmax_model_saver_1.py",
    "chars": 2187,
    "preview": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\nfrom "
  },
  {
    "path": "Chapter03/Python 3.5/five_layers_relu_1.py",
    "chars": 2807,
    "preview": "from tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\nimport math\n\nlogs_path = 'log_simple_"
  },
  {
    "path": "Chapter03/Python 3.5/five_layers_relu_dropout_1.py",
    "chars": 2853,
    "preview": "from tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\nimport math\n\nlogs_path = 'log_simple_"
  },
  {
    "path": "Chapter03/Python 3.5/five_layers_sigmoid_1.py",
    "chars": 2457,
    "preview": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport math\n\nlogs_path = 'log_simple_"
  },
  {
    "path": "Chapter03/Python 3.5/softmax_classifier_1.py",
    "chars": 2205,
    "preview": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\nfrom "
  },
  {
    "path": "Chapter03/Python 3.5/softmax_model_loader_1.py",
    "chars": 766,
    "preview": "import matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nfrom random import randint\nfrom tensorflow.ex"
  },
  {
    "path": "Chapter03/Python 3.5/softmax_model_saver_1.py",
    "chars": 2187,
    "preview": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\nfrom "
  },
  {
    "path": "Chapter04/EMOTION_CNN/Python 2.7/EmotionDetectorUtils.py",
    "chars": 4429,
    "preview": "import pandas as pd\nimport numpy as np\nimport os, sys, inspect\nfrom six.moves import cPickle as pickle\nimport scipy.misc"
  },
  {
    "path": "Chapter04/EMOTION_CNN/Python 2.7/EmotionDetector_1.py",
    "chars": 10247,
    "preview": "import tensorflow as tf\nimport numpy as np\n#import os, sys, inspect\nfrom datetime import datetime\nimport EmotionDetector"
  },
  {
    "path": "Chapter04/EMOTION_CNN/Python 2.7/test_your_image.py",
    "chars": 1963,
    "preview": "from scipy import misc\r\nimport numpy as np\r\nimport matplotlib.cm as cm\r\nimport tensorflow as tf\r\nimport os, sys, inspect"
  },
  {
    "path": "Chapter04/EMOTION_CNN/Python 3.5/EmotionDetectorUtils.py",
    "chars": 4434,
    "preview": "import pandas as pd\nimport numpy as np\nimport os, sys, inspect\nfrom six.moves import cPickle as pickle\nimport scipy.misc"
  },
  {
    "path": "Chapter04/EMOTION_CNN/Python 3.5/EmotionDetector_1.py",
    "chars": 10122,
    "preview": "import tensorflow as tf\nimport numpy as np\n#import os, sys, inspect\nfrom datetime import datetime\nimport EmotionDetector"
  },
  {
    "path": "Chapter04/EMOTION_CNN/Python 3.5/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "Chapter04/EMOTION_CNN/Python 3.5/test_your_image.py",
    "chars": 1963,
    "preview": "from scipy import misc\r\nimport numpy as np\r\nimport matplotlib.cm as cm\r\nimport tensorflow as tf\r\nimport os, sys, inspect"
  },
  {
    "path": "Chapter04/MNIST_CNN/Python 2.7/mnist_cnn_1.py",
    "chars": 6386,
    "preview": "import tensorflow as tf\r\nimport numpy as np\r\n#import mnist_data \r\n\r\nbatch_size = 128\r\ntest_size = 256\r\nimg_size = 28\r\nnu"
  },
  {
    "path": "Chapter04/MNIST_CNN/Python 3.5/mnist_cnn_1.py",
    "chars": 6384,
    "preview": "import tensorflow as tf\r\nimport numpy as np\r\n#import mnist_data \r\n\r\nbatch_size = 128\r\ntest_size = 256\r\nimg_size = 28\r\nnu"
  },
  {
    "path": "Chapter05/Python 2.7/Convlutional_AutoEncoder.py",
    "chars": 4780,
    "preview": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport math\r\nimport tensorflow as tf\r\nimport tensorflow.examples.tu"
  },
  {
    "path": "Chapter05/Python 2.7/autoencoder_1.py",
    "chars": 3896,
    "preview": "import tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# Import MINST data\r\nfrom tensorflow.e"
  },
  {
    "path": "Chapter05/Python 2.7/deconvolutional_autoencoder_1.py",
    "chars": 3967,
    "preview": "import numpy as np\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.examples.tutorials.mnist i"
  },
  {
    "path": "Chapter05/Python 2.7/denoising_autoencoder_1.py",
    "chars": 4970,
    "preview": "import numpy as np\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.examples.tutorials.mnist i"
  },
  {
    "path": "Chapter05/Python 3.5/Convlutional_AutoEncoder.py",
    "chars": 4780,
    "preview": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport math\r\nimport tensorflow as tf\r\nimport tensorflow.examples.tu"
  },
  {
    "path": "Chapter05/Python 3.5/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "Chapter05/Python 3.5/autoencoder_1.py",
    "chars": 3897,
    "preview": "import tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# Import MINST data\r\nfrom tensorflow.e"
  },
  {
    "path": "Chapter05/Python 3.5/deconvolutional_autoencoder_1.py",
    "chars": 3477,
    "preview": "import numpy as np\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.examples.tutorials.mnist i"
  },
  {
    "path": "Chapter05/Python 3.5/denoising_autoencoder_1.py",
    "chars": 4906,
    "preview": "import numpy as np\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.examples.tutorials.mnist i"
  },
  {
    "path": "Chapter06/Python 2.7/LSTM_model_1.py",
    "chars": 2312,
    "preview": "import tensorflow as tf\r\nfrom tensorflow.contrib import rnn\r\n\r\nfrom tensorflow.examples.tutorials.mnist import input_dat"
  },
  {
    "path": "Chapter06/Python 2.7/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "Chapter06/Python 2.7/bidirectional_RNN_1.py",
    "chars": 2709,
    "preview": "import tensorflow as tf\r\nimport numpy as np\r\nfrom tensorflow.contrib import rnn\r\n\r\nfrom tensorflow.examples.tutorials.mn"
  },
  {
    "path": "Chapter06/Python 3.5/LSTM_model_1.py",
    "chars": 2312,
    "preview": "import tensorflow as tf\r\nfrom tensorflow.contrib import rnn\r\n\r\nfrom tensorflow.examples.tutorials.mnist import input_dat"
  },
  {
    "path": "Chapter06/Python 3.5/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "Chapter06/Python 3.5/bidirectional_RNN_1.py",
    "chars": 2709,
    "preview": "import tensorflow as tf\r\nimport numpy as np\r\nfrom tensorflow.contrib import rnn\r\n\r\nfrom tensorflow.examples.tutorials.mn"
  },
  {
    "path": "Chapter07/Python 2.7/gpu_computing_with_multiple_GPU.py",
    "chars": 952,
    "preview": "import numpy as np\r\nimport tensorflow as tf\r\nimport datetime\r\n\r\nlog_device_placement = True\r\nn = 10\r\n\r\nA = np.random.ran"
  },
  {
    "path": "Chapter07/Python 2.7/gpu_example.py",
    "chars": 961,
    "preview": "import numpy as np\r\nimport tensorflow as tf\r\nimport datetime\r\n\r\nlog_device_placement = True\r\n\r\nn = 10\r\n\r\nA = np.random.r"
  },
  {
    "path": "Chapter07/Python 2.7/gpu_soft_placemnet_1.py",
    "chars": 955,
    "preview": "import numpy as np\r\nimport tensorflow as tf\r\nimport datetime\r\n\r\nlog_device_placement = True\r\nn = 10\r\n\r\nA = np.random.ran"
  },
  {
    "path": "Chapter07/Python 3.5/gpu_computing_with_multiple_GPU.py",
    "chars": 896,
    "preview": "import numpy as np\r\nimport tensorflow as tf\r\nimport datetime\r\n\r\nlog_device_placement = True\r\nn = 10\r\n\r\nA = np.random.ran"
  },
  {
    "path": "Chapter07/Python 3.5/gpu_example.py",
    "chars": 961,
    "preview": "import numpy as np\r\nimport tensorflow as tf\r\nimport datetime\r\n\r\nlog_device_placement = True\r\nn = 10\r\nA = np.random.rand("
  },
  {
    "path": "Chapter07/Python 3.5/gpu_soft_placemnet_1.py",
    "chars": 889,
    "preview": "import numpy as np\r\nimport tensorflow as tf\r\nimport datetime\r\n\r\nlog_device_placement = True\r\nn = 10\r\n\r\nA = np.random.ran"
  },
  {
    "path": "Chapter08/Python 2.7/digit_classifier.py",
    "chars": 3560,
    "preview": "from six.moves import xrange  \r\nimport tensorflow as tf\r\nimport prettytensor as pt\r\nfrom prettytensor.tutorial import da"
  },
  {
    "path": "Chapter08/Python 2.7/keras_movie_classifier_1.py",
    "chars": 1301,
    "preview": "import numpy\r\nfrom keras.datasets import imdb\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom"
  },
  {
    "path": "Chapter08/Python 2.7/keras_movie_classifier_using_convLayer_1.py",
    "chars": 1562,
    "preview": "from __future__ import print_function\r\n\r\nimport numpy\r\nfrom keras.datasets import imdb\r\nfrom keras.models import Sequent"
  },
  {
    "path": "Chapter08/Python 2.7/pretty_tensor_digit_1.py",
    "chars": 3301,
    "preview": "import tensorflow as tf\r\nimport prettytensor as pt\r\nfrom prettytensor.tutorial import data_utils\r\n\r\ntf.app.flags.DEFINE_"
  },
  {
    "path": "Chapter08/Python 2.7/tflearn_titanic_classifier.py",
    "chars": 896,
    "preview": "from tflearn.datasets import titanic\r\ntitanic.download_dataset('titanic_dataset.csv')\r\nfrom tflearn.data_utils import lo"
  },
  {
    "path": "Chapter08/Python 3.5/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "Chapter08/Python 3.5/digit_classifier.py",
    "chars": 3655,
    "preview": "from six.moves import range  \r\nimport tensorflow as tf\r\nimport prettytensor as pt\r\nfrom prettytensor.tutorial import dat"
  },
  {
    "path": "Chapter08/Python 3.5/keras_movie_classifier_1.py",
    "chars": 1299,
    "preview": "import numpy\r\nfrom keras.datasets import imdb\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom"
  },
  {
    "path": "Chapter08/Python 3.5/keras_movie_classifier_using_convLayer_1.py",
    "chars": 1562,
    "preview": "from __future__ import print_function\r\n\r\nimport numpy\r\nfrom keras.datasets import imdb\r\nfrom keras.models import Sequent"
  },
  {
    "path": "Chapter08/Python 3.5/pretty_tensor_digit_1.py",
    "chars": 3336,
    "preview": "import tensorflow as tf\r\nimport prettytensor as pt\r\nfrom prettytensor.tutorial import data_utils\r\n\r\ntf.app.flags.DEFINE_"
  },
  {
    "path": "Chapter08/Python 3.5/tflearn_titanic_classifier.py",
    "chars": 1042,
    "preview": "import tflearn\r\nfrom tflearn.datasets import titanic\r\nimport numpy as np\r\ntitanic.download_dataset('titanic_dataset.csv'"
  },
  {
    "path": "Chapter08/data/titanic_dataset.csv",
    "chars": 82865,
    "preview": "survived,pclass,name,sex,age,sibsp,parch,ticket,fare\r\n1,1,\"Allen, Miss. Elisabeth Walton\",female,29,0,0,24160,211.3375\r\n"
  },
  {
    "path": "Chapter09/Python 2.7/classify_image.py",
    "chars": 1237,
    "preview": "import tensorflow as tf, sys\n\n# You will be sending the image to be classified as a parameter\nprovided_image_path = sys."
  },
  {
    "path": "Chapter09/Python 3.5/classify_image.py",
    "chars": 1237,
    "preview": "import tensorflow as tf, sys\n\n# You will be sending the image to be classified as a parameter\nprovided_image_path = sys."
  },
  {
    "path": "Chapter10/Python 2.7/FrozenLake_1.py",
    "chars": 1093,
    "preview": "import gym\r\nimport numpy as np\r\n\r\nenv = gym.make('FrozenLake-v0')\r\n\r\n#Initialize table with all zeros\r\nQ = np.zeros([env"
  },
  {
    "path": "Chapter10/Python 2.7/Q_Learning_1.py",
    "chars": 2080,
    "preview": "import gym\r\nimport numpy as np\r\nimport random\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\n\r\n#Define the F"
  },
  {
    "path": "Chapter10/Python 3.5/FrozenLake_1.py",
    "chars": 1093,
    "preview": "import gym\r\nimport numpy as np\r\n\r\nenv = gym.make('FrozenLake-v0')\r\n\r\n#Initialize table with all zeros\r\nQ = np.zeros([env"
  },
  {
    "path": "Chapter10/Python 3.5/Q_Learning_1.py",
    "chars": 2080,
    "preview": "import gym\r\nimport numpy as np\r\nimport random\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\n\r\n#Define the F"
  },
  {
    "path": "LICENSE",
    "chars": 1070,
    "preview": "MIT License\n\nCopyright (c) 2017 Deeptituscano\n\nPermission is hereby granted, free of charge, to any person obtaining a c"
  },
  {
    "path": "README.md",
    "chars": 3348,
    "preview": "\n\n\n# Deep Learning with TensorFlow\nDeep Learning with TensorFlow by Packt\n\nThis is the code repository for [Deep Learnin"
  }
]

// ... and 2 more files (download for full content)

About this extraction

This page contains the full source code of the PacktPublishing/Deep-Learning-with-TensorFlow GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 80 files (44.2 MB), approximately 89.1k tokens, and a symbol index with 66 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!