[
  {
    "path": ".github/FUNDING.yml",
    "content": "# These are supported funding model platforms\n\ngithub: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]\npatreon: # Replace with a single Patreon username\nopen_collective: # Replace with a single Open Collective username\nko_fi: yihuihe\ntidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel\ncommunity_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry\nliberapay: # Replace with a single Liberapay username\nissuehunt: # Replace with a single IssueHunt username\notechie: # Replace with a single Otechie username\nlfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry\ncustom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']\n"
  },
  {
    "path": ".gitignore",
    "content": "*.pyc\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2018 Yihui He 何宜晖\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# GAN on MNIST with TensorFlow\n\n[GitHub - yihui-he/GAN-MNIST: Generative Adversarial Network for MNIST with tensorflow](https://github.com/yihui-he/GAN-MNIST)\n\n![Untitled](https://github.com/ethanhe42/GAN-MNIST/assets/10027339/8f39f2b6-b2dd-4f0b-9fbf-f33247b0b70e)\n\n\n![Untitled 1](https://github.com/ethanhe42/GAN-MNIST/assets/10027339/de4f99c4-f615-4954-9db1-e9883396dc3a)\n\n\n### Tensorflow implementation\n\n- All the codes in this project are mere replication of [Theano version](https://github.com/Newmu/dcgan_code)\n\n### Code\n\n- Under `face/` and `mnist/`\n- model.py\n- Definition of DCGAN model\n- train.py\n- Training the DCGAN model (and Generating samples time to time)\n- util.py\n- Image related utils\n\n### Dataset\n\n- MNIST\n- http://yann.lecun.com/exdb/mnist/\n- CelebA Face dataset\n- http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html\n- Download “img_align_celeba” images\n- Set “face_image_path” in train.py according to the path of downloaded dataset\n\n### references\n\nhttps://github.com/carpedm20/DCGAN-tensorflow\n\n### Citation\n\nIf you find the code useful in your research, please consider citing:\n\n```\n@InProceedings{He_2017_ICCV,\nauthor = {He, Yihui and Zhang, Xiangyu and Sun, Jian},\ntitle = {Channel Pruning for Accelerating Very Deep Neural Networks},\nbooktitle = {The IEEE International Conference on Computer Vision (ICCV)},\nmonth = {Oct},\nyear = {2017}\n}\n```\n"
  },
  {
    "path": "face/model.py",
    "content": "#-*- coding: utf-8 -*-\nimport tensorflow as tf\nimport ipdb\n\ndef batchnormalize(X, eps=1e-8, g=None, b=None):\n    if X.get_shape().ndims == 4:\n        mean = tf.reduce_mean(X, [0,1,2])\n        std = tf.reduce_mean( tf.square(X-mean), [0,1,2] )\n        X = (X-mean) / tf.sqrt(std+eps)\n\n        if g is not None and b is not None:\n            g = tf.reshape(g, [1,1,1,-1])\n            b = tf.reshape(b, [1,1,1,-1])\n            X = X*g + b\n\n    elif X.get_shape().ndims == 2:\n        mean = tf.reduce_mean(X, 0)\n        std = tf.reduce_mean(tf.square(X-mean), 0)\n        X = (X-mean) / tf.sqrt(std+eps)#std\n\n        if g is not None and b is not None:\n            g = tf.reshape(g, [1,-1])\n            b = tf.reshape(b, [1,-1])\n            X = X*g + b\n\n    else:\n        raise NotImplementedError\n\n    return X\n\ndef lrelu(X, leak=0.2):\n    f1 = 0.5 * (1 + leak)\n    f2 = 0.5 * (1 - leak)\n    return f1 * X + f2 * tf.abs(X)\n\ndef bce(o, t):\n    o = tf.clip_by_value(o, 1e-7, 1. - 1e-7)\n    return -(t * tf.log(o) + (1.- t)*tf.log(1. - o))\n\nclass DCGAN():\n    def __init__(\n            self,\n            batch_size=100,\n            image_shape=[64,64,3],\n            dim_z=100,\n            dim_W1=1024,\n            dim_W2=512,\n            dim_W3=256,\n            dim_W4=128,\n            dim_W5=3,\n            ):\n\n        self.batch_size = batch_size\n        self.image_shape = image_shape\n        self.dim_z = dim_z\n\n        self.dim_W1 = dim_W1\n        self.dim_W2 = dim_W2\n        self.dim_W3 = dim_W3\n        self.dim_W4 = dim_W4\n        self.dim_W5 = dim_W5\n\n        self.gen_W1 = tf.Variable(tf.random_normal([dim_z, dim_W1*4*4], stddev=0.02), name='gen_W1')\n        self.gen_bn_g1 = tf.Variable( tf.random_normal([dim_W1*4*4], mean=1.0, stddev=0.02), name='gen_bn_g1')\n        self.gen_bn_b1 = tf.Variable( tf.zeros([dim_W1*4*4]), name='gen_bn_b1')\n\n        self.gen_W2 = tf.Variable(tf.random_normal([5,5,dim_W2, dim_W1], stddev=0.02), name='gen_W2')\n        self.gen_bn_g2 = tf.Variable( tf.random_normal([dim_W2], mean=1.0, stddev=0.02), name='gen_bn_g2')\n        self.gen_bn_b2 = tf.Variable( tf.zeros([dim_W2]), name='gen_bn_b2')\n\n        self.gen_W3 = tf.Variable(tf.random_normal([5,5,dim_W3, dim_W2], stddev=0.02), name='gen_W3')\n        self.gen_bn_g3 = tf.Variable( tf.random_normal([dim_W3], mean=1.0, stddev=0.02), name='gen_bn_g3')\n        self.gen_bn_b3 = tf.Variable( tf.zeros([dim_W3]), name='gen_bn_b3')\n\n        self.gen_W4 = tf.Variable(tf.random_normal([5,5,dim_W4, dim_W3], stddev=0.02), name='gen_W4')\n        self.gen_bn_g4 = tf.Variable( tf.random_normal([dim_W4], mean=1.0, stddev=0.02), name='gen_bn_g4')\n        self.gen_bn_b4 = tf.Variable( tf.zeros([dim_W4]), name='gen_bn_b4')\n\n        self.gen_W5 = tf.Variable(tf.random_normal([5,5,dim_W5, dim_W4], stddev=0.02), name='gen_W5')\n\n        self.discrim_W1 = tf.Variable(tf.random_normal([5,5,dim_W5,dim_W4], stddev=0.02), name='discrim_W1')\n\n        self.discrim_W2 = tf.Variable(tf.random_normal([5,5,dim_W4,dim_W3], stddev=0.02), name='discrim_W2')\n        self.discrim_bn_g2 = tf.Variable( tf.random_normal([dim_W3], mean=1.0, stddev=0.02), name='discrim_bn_g2')\n        self.discrim_bn_b2 = tf.Variable( tf.zeros([dim_W3]), name='discrim_bn_b2')\n\n        self.discrim_W3 = tf.Variable(tf.random_normal([5,5,dim_W3,dim_W2], stddev=0.02), name='discrim_W3')\n        self.discrim_bn_g3 = tf.Variable( tf.random_normal([dim_W2], mean=1.0, stddev=0.02), name='discrim_bn_g3')\n        self.discrim_bn_b3 = tf.Variable( tf.zeros([dim_W2]), name='discrim_bn_b3')\n\n        self.discrim_W4 = tf.Variable(tf.random_normal([5,5,dim_W2,dim_W1], stddev=0.02), name='discrim_W4')\n        self.discrim_bn_g4 = tf.Variable( tf.random_normal([dim_W1], mean=1.0, stddev=0.02), name='discrim_bn_g4')\n        self.discrim_bn_b4 = tf.Variable( tf.zeros([dim_W1]), name='discrim_bn_b4')\n\n        self.discrim_W5 = tf.Variable(tf.random_normal([4*4*dim_W1,1], stddev=0.02), name='discrim_W5')\n\n        self.gen_params = [\n                self.gen_W1, self.gen_bn_g1, self.gen_bn_b1,\n                self.gen_W2, self.gen_bn_g2, self.gen_bn_b2,\n                self.gen_W3, self.gen_bn_g3, self.gen_bn_b3,\n                self.gen_W4, self.gen_bn_g4, self.gen_bn_b4,\n                self.gen_W5\n                ]\n\n        self.discrim_params = [\n                self.discrim_W1,\n                self.discrim_W2, self.discrim_bn_g2, self.discrim_bn_b2,\n                self.discrim_W3, self.discrim_bn_g3, self.discrim_bn_b3,\n                self.discrim_W4, self.discrim_bn_g4, self.discrim_bn_b4,\n                self.discrim_W5\n                ]\n\n    def build_model(self):\n\n        Z = tf.placeholder(tf.float32, [self.batch_size, self.dim_z])\n\n        image_real = tf.placeholder(tf.float32, [self.batch_size]+self.image_shape)\n        image_gen = self.generate(Z)\n\n        p_real, h_real = self.discriminate(image_real)\n        p_gen, h_gen = self.discriminate(image_gen)\n\n        discrim_cost_real = bce(p_real, tf.ones_like(p_real))\n        discrim_cost_gen = bce(p_gen, tf.zeros_like(p_gen))\n        discrim_cost = tf.reduce_mean(discrim_cost_real) + tf.reduce_mean(discrim_cost_gen)\n\n        gen_cost = tf.reduce_mean(bce( p_gen, tf.ones_like(p_gen) ))\n\n        return Z, image_real, discrim_cost, gen_cost, p_real, p_gen, h_real, h_gen\n\n    def discriminate(self, image):\n        h1 = lrelu( tf.nn.conv2d( image, self.discrim_W1, strides=[1,2,2,1], padding='SAME' ))\n        h2 = lrelu( batchnormalize( tf.nn.conv2d( h1, self.discrim_W2, strides=[1,2,2,1], padding='SAME'), g=self.discrim_bn_g2, b=self.discrim_bn_b2) )\n        h3 = lrelu( batchnormalize( tf.nn.conv2d( h2, self.discrim_W3, strides=[1,2,2,1], padding='SAME'), g=self.discrim_bn_g3, b=self.discrim_bn_b3) )\n        h4 = lrelu( batchnormalize( tf.nn.conv2d( h3, self.discrim_W4, strides=[1,2,2,1], padding='SAME'), g=self.discrim_bn_g4, b=self.discrim_bn_b4) )\n        h4 = tf.reshape(h4, [self.batch_size, -1])\n        h5 = tf.matmul( h4, self.discrim_W5 )\n        y = tf.nn.sigmoid(h5)\n        return y, h5\n\n    def generate(self, Z):\n        h1 = tf.nn.relu(batchnormalize(tf.matmul(Z, self.gen_W1), g=self.gen_bn_g1, b=self.gen_bn_b1))\n        h1 = tf.reshape(h1, [self.batch_size,4,4,self.dim_W1])\n\n        output_shape_l2 = [self.batch_size,8,8,self.dim_W2]\n        h2 = tf.nn.conv2d_transpose(h1, self.gen_W2, output_shape=output_shape_l2, strides=[1,2,2,1])\n        h2 = tf.nn.relu( batchnormalize(h2, g=self.gen_bn_g2, b=self.gen_bn_b2) )\n\n        output_shape_l3 = [self.batch_size,16,16,self.dim_W3]\n        h3 = tf.nn.conv2d_transpose(h2, self.gen_W3, output_shape=output_shape_l3, strides=[1,2,2,1])\n        h3 = tf.nn.relu( batchnormalize(h3, g=self.gen_bn_g3, b=self.gen_bn_b3) )\n\n        output_shape_l4 = [self.batch_size,32,32,self.dim_W4]\n        h4 = tf.nn.conv2d_transpose(h3, self.gen_W4, output_shape=output_shape_l4, strides=[1,2,2,1])\n        h4 = tf.nn.relu( batchnormalize(h4, g=self.gen_bn_g4, b=self.gen_bn_b4) )\n\n        output_shape_l5 = [self.batch_size,64,64,self.dim_W5]\n        h5 = tf.nn.conv2d_transpose(h4, self.gen_W5, output_shape=output_shape_l5, strides=[1,2,2,1])\n\n        x = tf.nn.tanh(h5)\n        return x\n\n    def samples_generator(self, batch_size):\n\n        Z = tf.placeholder(tf.float32, [batch_size, self.dim_z])\n        h1 = tf.nn.relu(batchnormalize(tf.matmul(Z, self.gen_W1)))\n        h1 = tf.reshape(h1, [batch_size,4,4,self.dim_W1])\n\n        output_shape_l2 = [batch_size,8,8,self.dim_W2]\n        h2 = tf.nn.conv2d_transpose(h1, self.gen_W2, output_shape=output_shape_l2, strides=[1,2,2,1])\n        h2 = tf.nn.relu( batchnormalize(h2) )\n\n        output_shape_l3 = [batch_size,16,16,self.dim_W3]\n        h3 = tf.nn.conv2d_transpose(h2, self.gen_W3, output_shape=output_shape_l3, strides=[1,2,2,1])\n        h3 = tf.nn.relu( batchnormalize(h3) )\n\n        output_shape_l4 = [batch_size,32,32,self.dim_W4]\n        h4 = tf.nn.conv2d_transpose(h3, self.gen_W4, output_shape=output_shape_l4, strides=[1,2,2,1])\n        h4 = tf.nn.relu( batchnormalize(h4) )\n\n        output_shape_l5 = [batch_size,64,64,self.dim_W5]\n        h5 = tf.nn.conv2d_transpose(h4, self.gen_W5, output_shape=output_shape_l5, strides=[1,2,2,1])\n\n        x = tf.nn.tanh(h5)\n        return Z, x\n\n"
  },
  {
    "path": "face/train.py",
    "content": "import ipdb\nimport os\nimport pandas as pd\nimport numpy as np\nfrom model import *\nfrom util import *\n\nn_epochs = 100\nlearning_rate = 0.0002\nbatch_size = 128\nimage_shape = [64,64,3]\ndim_z = 100\ndim_W1 = 1024\ndim_W2 = 512\ndim_W3 = 256\ndim_W4 = 128\ndim_W5 = 3\n\nvisualize_dim=196\n\nface_image_path = '/media/storage3/Study/data/celeb/img_align_celeba'\nface_images = filter(lambda x: x.endswith('jpg'), os.listdir(face_image_path))\n\ndcgan_model = DCGAN(\n        batch_size=batch_size,\n        image_shape=image_shape,\n        dim_z=dim_z,\n        dim_W1=dim_W1,\n        dim_W2=dim_W2,\n        dim_W3=dim_W3,\n        dim_W4=dim_W4,\n        dim_W5=dim_W5\n        )\n\nZ_tf, image_tf, d_cost_tf, g_cost_tf, p_real, p_gen, h_real, h_gen = dcgan_model.build_model()\nsess = tf.InteractiveSession()\nsaver = tf.train.Saver(max_to_keep=10)\n\ndiscrim_vars = filter(lambda x: x.name.startswith('discrim'), tf.trainable_variables())\ngen_vars = filter(lambda x: x.name.startswith('gen'), tf.trainable_variables())\n\ntrain_op_discrim = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(d_cost_tf, var_list=discrim_vars)\ntrain_op_gen = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(g_cost_tf, var_list=gen_vars)\n\nZ_tf_sample, image_tf_sample = dcgan_model.samples_generator(batch_size=visualize_dim)\n\ntf.initialize_all_variables().run()\n\nZ_np_sample = np.random.uniform(-1, 1, size=(visualize_dim,dim_z))\niterations = 0\nk = 2\n\nfor epoch in range(n_epochs):\n    np.random.shuffle(face_images)\n\n    for start, end in zip(\n            range(0, len(face_images), batch_size),\n            range(batch_size, len(face_images), batch_size)\n            ):\n\n        batch_image_files = face_images[start:end]\n        batch_images = map(lambda x: crop_resize( os.path.join( face_image_path, x) ), batch_image_files)\n        batch_images = np.array(batch_images).astype(np.float32)\n        batch_z = np.random.uniform(-1, 1, size=[batch_size, dim_z]).astype(np.float32)\n\n        if np.mod( iterations, k ) == 0:\n            _, gen_loss_val = sess.run(\n                    [train_op_gen, g_cost_tf],\n                    feed_dict={\n                        Z_tf:batch_z,\n                        })\n            discrim_loss_val, p_real_val, p_gen_val, h_real_val, h_gen_val = sess.run([d_cost_tf,p_real,p_gen, h_real, h_gen], feed_dict={Z_tf:batch_z, image_tf:batch_images})\n            print(\"=========== updating G ==========\")\n            print(\"iteration:\", iterations)\n            print(\"gen loss:\", gen_loss_val)\n            print(\"discrim loss:\", discrim_loss_val)\n\n        else:\n            _, discrim_loss_val = sess.run(\n                    [train_op_discrim, d_cost_tf],\n                    feed_dict={\n                        Z_tf:batch_z,\n                        image_tf:batch_images\n                        })\n            gen_loss_val, p_real_val, p_gen_val, h_real_val, h_gen_val = sess.run([g_cost_tf, p_real, p_gen, h_real, h_gen], feed_dict={Z_tf:batch_z, image_tf:batch_images})\n            print(\"=========== updating D ==========\")\n            print(\"iteration:\", iterations)\n            print(\"gen loss:\", gen_loss_val)\n            print(\"discrim loss:\", discrim_loss_val)\n\n        ipdb.set_trace()\n\n        if np.mod(iterations, 100) == 0:\n            generated_samples = sess.run(\n                    image_tf_sample,\n                    feed_dict={\n                        Z_tf_sample:Z_np_sample\n                        })\n            generated_samples = (generated_samples + 1.)/2.\n            save_visualization(generated_samples, (14,14), save_path='./vis/sample_'+str(iterations/100)+'.jpg')\n\n        iterations += 1\n\n"
  },
  {
    "path": "face/util.py",
    "content": "import cv2\nimport scipy.misc\nimport ipdb\nimport numpy as np\n\ndef crop_resize(image_path, resize_shape=(64,64)):\n    image = cv2.imread(image_path)\n    height, width, channel = image.shape\n\n    if width == height:\n        resized_image = cv2.resize(image, resize_shape)\n    elif width > height:\n        resized_image = cv2.resize(image, (int(width * float(resize_shape[0])/height), resize_shape[1]))\n        cropping_length = int( (resized_image.shape[1] - resize_shape[0]) / 2)\n        resized_image = resized_image[:,cropping_length:cropping_length+resize_shape[1]]\n    else:\n        resized_image = cv2.resize(image, (resize_shape[0], int(height * float(resize_shape[1])/width)))\n        cropping_length = int( (resized_image.shape[0] - resize_shape[1]) / 2)\n        resized_image = resized_image[cropping_length:cropping_length+resize_shape[0], :]\n\n    return resized_image/127.5 - 1\n\ndef save_visualization(X, (nh, nw), save_path='./vis/sample.jpg'):\n    h,w = X.shape[1], X.shape[2]\n    img = np.zeros((h * nh, w * nw, 3))\n\n    for n,x in enumerate(X):\n        j = n / nw\n        i = n % nw\n        img[j*h:j*h+h, i*w:i*w+w, :] = x\n\n    scipy.misc.imsave(save_path, img)\n"
  },
  {
    "path": "lsun/model.py",
    "content": "#-*- coding: utf-8 -*-\nimport tensorflow as tf\nimport ipdb\n\ndef batchnormalize(X, eps=1e-8, g=None, b=None):\n    if X.get_shape().ndims == 4:\n        mean = tf.reduce_mean(X, [0,1,2])\n        std = tf.reduce_mean( tf.square(X-mean), [0,1,2] )\n        X = (X-mean) / tf.sqrt(std+eps)\n\n        if g is not None and b is not None:\n            g = tf.reshape(g, [1,1,1,-1])\n            b = tf.reshape(b, [1,1,1,-1])\n            X = X*g + b\n\n    elif X.get_shape().ndims == 2:\n        mean = tf.reduce_mean(X, 0)\n        std = tf.reduce_mean(tf.square(X-mean), 0)\n        X = (X-mean) / tf.sqrt(std+eps)#std\n\n        if g is not None and b is not None:\n            g = tf.reshape(g, [1,-1])\n            b = tf.reshape(b, [1,-1])\n            X = X*g + b\n\n    else:\n        raise NotImplementedError\n\n    return X\n\ndef lrelu(X, leak=0.2):\n    f1 = 0.5 * (1 + leak)\n    f2 = 0.5 * (1 - leak)\n    return f1 * X + f2 * tf.abs(X)\n\ndef bce(o, t):\n    o = tf.clip_by_value(o, 1e-7, 1. - 1e-7)\n    return -(t * tf.log(o) + (1.- t)*tf.log(1. - o))\n\nclass DCGAN():\n    def __init__(\n            self,\n            batch_size=100,\n            image_shape=[64,64,3],\n            dim_z=100,\n            dim_W1=1024,\n            dim_W2=512,\n            dim_W3=256,\n            dim_W4=128,\n            dim_W5=3,\n            ):\n\n        self.batch_size = batch_size\n        self.image_shape = image_shape\n        self.dim_z = dim_z\n\n        self.dim_W1 = dim_W1\n        self.dim_W2 = dim_W2\n        self.dim_W3 = dim_W3\n        self.dim_W4 = dim_W4\n        self.dim_W5 = dim_W5\n\n        self.gen_W1 = tf.Variable(tf.truncated_normal([dim_z, dim_W1*4*4], stddev=0.02), name='gen_W1')\n        self.gen_bn_g1 = tf.Variable( tf.truncated_normal([dim_W1*4*4], mean=1.0, stddev=0.02), name='gen_bn_g1')\n        self.gen_bn_b1 = tf.Variable( tf.zeros([dim_W1*4*4]), name='gen_bn_b1')\n\n        self.gen_W2 = tf.Variable(tf.truncated_normal([5,5,dim_W2, dim_W1], stddev=0.02), name='gen_W2')\n        self.gen_bn_g2 = tf.Variable( tf.truncated_normal([dim_W2], mean=1.0, stddev=0.02), name='gen_bn_g2')\n        self.gen_bn_b2 = tf.Variable( tf.zeros([dim_W2]), name='gen_bn_b2')\n\n        self.gen_W3 = tf.Variable(tf.truncated_normal([5,5,dim_W3, dim_W2], stddev=0.02), name='gen_W3')\n        self.gen_bn_g3 = tf.Variable( tf.truncated_normal([dim_W3], mean=1.0, stddev=0.02), name='gen_bn_g3')\n        self.gen_bn_b3 = tf.Variable( tf.zeros([dim_W3]), name='gen_bn_b3')\n\n        self.gen_W4 = tf.Variable(tf.truncated_normal([5,5,dim_W4, dim_W3], stddev=0.02), name='gen_W4')\n        self.gen_bn_g4 = tf.Variable( tf.truncated_normal([dim_W4], mean=1.0, stddev=0.02), name='gen_bn_g4')\n        self.gen_bn_b4 = tf.Variable( tf.zeros([dim_W4]), name='gen_bn_b4')\n\n        self.gen_W5 = tf.Variable(tf.truncated_normal([5,5,dim_W5, dim_W4], stddev=0.02), name='gen_W5')\n\n        self.discrim_W1 = tf.Variable(tf.truncated_normal([5,5,dim_W5,dim_W4], stddev=0.02), name='discrim_W1')\n\n        self.discrim_W2 = tf.Variable(tf.truncated_normal([5,5,dim_W4,dim_W3], stddev=0.02), name='discrim_W2')\n        self.discrim_bn_g2 = tf.Variable( tf.truncated_normal([dim_W3], mean=1.0, stddev=0.02), name='discrim_bn_g2')\n        self.discrim_bn_b2 = tf.Variable( tf.zeros([dim_W3]), name='discrim_bn_b2')\n\n        self.discrim_W3 = tf.Variable(tf.truncated_normal([5,5,dim_W3,dim_W2], stddev=0.02), name='discrim_W3')\n        self.discrim_bn_g3 = tf.Variable( tf.truncated_normal([dim_W2], mean=1.0, stddev=0.02), name='discrim_bn_g3')\n        self.discrim_bn_b3 = tf.Variable( tf.zeros([dim_W2]), name='discrim_bn_b3')\n\n        self.discrim_W4 = tf.Variable(tf.truncated_normal([5,5,dim_W2,dim_W1], stddev=0.02), name='discrim_W4')\n        self.discrim_bn_g4 = tf.Variable( tf.truncated_normal([dim_W1], mean=1.0, stddev=0.02), name='discrim_bn_g4')\n        self.discrim_bn_b4 = tf.Variable( tf.zeros([dim_W1]), name='discrim_bn_b4')\n\n        self.discrim_W5 = tf.Variable(tf.truncated_normal([4*4*dim_W1,1], stddev=0.02), name='discrim_W5')\n\n        self.gen_params = [\n                self.gen_W1, self.gen_bn_g1, self.gen_bn_b1,\n                self.gen_W2, self.gen_bn_g2, self.gen_bn_b2,\n                self.gen_W3, self.gen_bn_g3, self.gen_bn_b3,\n                self.gen_W4, self.gen_bn_g4, self.gen_bn_b4,\n                self.gen_W5\n                ]\n\n        self.discrim_params = [\n                self.discrim_W1,\n                self.discrim_W2, self.discrim_bn_g2, self.discrim_bn_b2,\n                self.discrim_W3, self.discrim_bn_g3, self.discrim_bn_b3,\n                self.discrim_W4, self.discrim_bn_g4, self.discrim_bn_b4,\n                self.discrim_W5\n                ]\n\n    def build_model(self):\n\n        Z = tf.placeholder(tf.float32, [self.batch_size, self.dim_z])\n\n        image_real = tf.placeholder(tf.float32, [self.batch_size]+self.image_shape)\n        image_gen = self.generate(Z)\n\n        p_real, h_real = self.discriminate(image_real)\n        p_gen, h_gen = self.discriminate(image_gen)\n\n        discrim_cost_real = bce(p_real, tf.ones_like(p_real))\n        discrim_cost_gen = bce(p_gen, tf.zeros_like(p_gen))\n        discrim_cost = tf.reduce_mean(discrim_cost_real) + tf.reduce_mean(discrim_cost_gen)\n\n        gen_cost = tf.reduce_mean(bce( p_gen, tf.ones_like(p_gen) ))\n\n        return Z, image_real, discrim_cost, gen_cost, p_real, p_gen, h_real, h_gen\n\n    def discriminate(self, image):\n        h1 = lrelu( tf.nn.conv2d( image, self.discrim_W1, strides=[1,2,2,1], padding='SAME' ))\n        h2 = lrelu( batchnormalize( tf.nn.conv2d( h1, self.discrim_W2, strides=[1,2,2,1], padding='SAME'), g=self.discrim_bn_g2, b=self.discrim_bn_b2) )\n        h3 = lrelu( batchnormalize( tf.nn.conv2d( h2, self.discrim_W3, strides=[1,2,2,1], padding='SAME'), g=self.discrim_bn_g3, b=self.discrim_bn_b3) )\n        h4 = lrelu( batchnormalize( tf.nn.conv2d( h3, self.discrim_W4, strides=[1,2,2,1], padding='SAME'), g=self.discrim_bn_g4, b=self.discrim_bn_b4) )\n        h4 = tf.reshape(h4, [self.batch_size, -1])\n        h5 = tf.matmul( h4, self.discrim_W5 )\n        y = tf.nn.sigmoid(h5)\n        return y, h5\n\n    def generate(self, Z):\n        h1 = tf.nn.relu(batchnormalize(tf.matmul(Z, self.gen_W1), g=self.gen_bn_g1, b=self.gen_bn_b1))\n        h1 = tf.reshape(h1, [self.batch_size,4,4,self.dim_W1])\n\n        output_shape_l2 = [self.batch_size,8,8,self.dim_W2]\n        h2 = tf.nn.conv2d_transpose(h1, self.gen_W2, output_shape=output_shape_l2, strides=[1,2,2,1])\n        h2 = tf.nn.relu( batchnormalize(h2, g=self.gen_bn_g2, b=self.gen_bn_b2) )\n\n        output_shape_l3 = [self.batch_size,16,16,self.dim_W3]\n        h3 = tf.nn.conv2d_transpose(h2, self.gen_W3, output_shape=output_shape_l3, strides=[1,2,2,1])\n        h3 = tf.nn.relu( batchnormalize(h3, g=self.gen_bn_g3, b=self.gen_bn_b3) )\n\n        output_shape_l4 = [self.batch_size,32,32,self.dim_W4]\n        h4 = tf.nn.conv2d_transpose(h3, self.gen_W4, output_shape=output_shape_l4, strides=[1,2,2,1])\n        h4 = tf.nn.relu( batchnormalize(h4, g=self.gen_bn_g4, b=self.gen_bn_b4) )\n\n        output_shape_l5 = [self.batch_size,64,64,self.dim_W5]\n        h5 = tf.nn.conv2d_transpose(h4, self.gen_W5, output_shape=output_shape_l5, strides=[1,2,2,1])\n\n        x = tf.nn.tanh(h5)\n        return x\n\n    def samples_generator(self, batch_size):\n\n        Z = tf.placeholder(tf.float32, [batch_size, self.dim_z])\n        h1 = tf.nn.relu(batchnormalize(tf.matmul(Z, self.gen_W1)))\n        h1 = tf.reshape(h1, [batch_size,4,4,self.dim_W1])\n\n        output_shape_l2 = [batch_size,8,8,self.dim_W2]\n        h2 = tf.nn.conv2d_transpose(h1, self.gen_W2, output_shape=output_shape_l2, strides=[1,2,2,1])\n        h2 = tf.nn.relu( batchnormalize(h2) )\n\n        output_shape_l3 = [batch_size,16,16,self.dim_W3]\n        h3 = tf.nn.conv2d_transpose(h2, self.gen_W3, output_shape=output_shape_l3, strides=[1,2,2,1])\n        h3 = tf.nn.relu( batchnormalize(h3) )\n\n        output_shape_l4 = [batch_size,32,32,self.dim_W4]\n        h4 = tf.nn.conv2d_transpose(h3, self.gen_W4, output_shape=output_shape_l4, strides=[1,2,2,1])\n        h4 = tf.nn.relu( batchnormalize(h4) )\n\n        output_shape_l5 = [batch_size,64,64,self.dim_W5]\n        h5 = tf.nn.conv2d_transpose(h4, self.gen_W5, output_shape=output_shape_l5, strides=[1,2,2,1])\n\n        x = tf.nn.tanh(h5)\n        return Z, x\n\n"
  },
  {
    "path": "lsun/train.py",
    "content": "import ipdb\nimport os\nimport pandas as pd\nimport numpy as np\nimport lmdb\nfrom glob import glob\nfrom model import *\nfrom util import *\n\nn_epochs = 100\nlearning_rate = 0.0002\nbatch_size = 128\nimage_shape = [64,64,3]\ndim_z = 100\ndim_W1 = 512#1024\ndim_W2 = 256#512\ndim_W3 = 128#256\ndim_W4 = 64#128\ndim_W5 = 3\n\nvisualize_dim=196\n\nlsun_image_path = '/media/storage3/Study/data/lsun/images/0'\nlsun_images = []\nfor dir,_,_ in os.walk(lsun_image_path):\n    lsun_images.append(glob(os.path.join(dir, '*.jpg')))\n\nlsun_images = lsun_images[0]\ndcgan_model = DCGAN(\n        batch_size=batch_size,\n        image_shape=image_shape,\n        dim_z=dim_z,\n        dim_W1=dim_W1,\n        dim_W2=dim_W2,\n        dim_W3=dim_W3,\n        dim_W4=dim_W4,\n        dim_W5=dim_W5\n        )\n\nZ_tf, image_tf, d_cost_tf, g_cost_tf, p_real, p_gen, h_real, h_gen = dcgan_model.build_model()\nsess = tf.InteractiveSession()\nsaver = tf.train.Saver(max_to_keep=10)\n\ndiscrim_vars = filter(lambda x: x.name.startswith('discrim'), tf.trainable_variables())\ngen_vars = filter(lambda x: x.name.startswith('gen'), tf.trainable_variables())\n\ntrain_op_discrim = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(d_cost_tf, var_list=discrim_vars)\ntrain_op_gen = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(g_cost_tf, var_list=gen_vars)\n\nZ_tf_sample, image_tf_sample = dcgan_model.samples_generator(batch_size=visualize_dim)\n\ntf.initialize_all_variables().run()\n\nZ_np_sample = np.random.uniform(-1, 1, size=(visualize_dim,dim_z))\niterations = 0\nk = 2\n\nfor epoch in range(n_epochs):\n\n    for start, end in zip(\n            range(0, len(lsun_images), batch_size),\n            range(batch_size, len(lsun_images), batch_size)\n            ):\n\n        batch_image_files = lsun_images[start:end]\n        batch_images = map(lambda x: crop_resize( os.path.join( lsun_image_path, x) ), batch_image_files)\n        batch_images = np.array(batch_images).astype(np.float32)\n        batch_z = np.random.uniform(-1, 1, size=[batch_size, dim_z]).astype(np.float32)\n\n        p_real_val, p_gen_val, h_real_val, h_gen_val = sess.run([p_real, p_gen, h_real, h_gen], feed_dict={Z_tf:batch_z, image_tf:batch_images})\n        if np.mod( iterations, k ) != 0:\n            _, gen_loss_val = sess.run(\n                    [train_op_gen, g_cost_tf],\n                    feed_dict={\n                        Z_tf:batch_z,\n                        })\n            print \"=========== updating G ==========\"\n            print \"iteration:\", iterations\n            print \"gen loss:\", gen_loss_val\n\n        else:\n            _, discrim_loss_val = sess.run(\n                    [train_op_discrim, d_cost_tf],\n                    feed_dict={\n                        Z_tf:batch_z,\n                        image_tf:batch_images\n                        })\n            print \"=========== updating D ==========\"\n            print \"iteration:\", iterations\n            print \"discrim loss:\", discrim_loss_val\n\n        print \"real h:\", h_real_val.mean(),\"  gen h:\", h_gen_val.mean()\n\n        if np.mod(iterations, 100) == 0:\n            generated_samples = sess.run(\n                    image_tf_sample,\n                    feed_dict={\n                        Z_tf_sample:Z_np_sample\n                        })\n            generated_samples = (generated_samples + 1.)/2.\n            save_visualization(generated_samples, (14,14), save_path='./vis/sample_'+str(iterations/100)+'.jpg')\n\n        iterations += 1\n\n"
  },
  {
    "path": "lsun/util.py",
    "content": "import cv2\nimport scipy.misc\nimport ipdb\nimport numpy as np\n\ndef crop_resize(image_path, resize_shape=(64,64)):\n    image = cv2.imread(image_path)\n    height, width, channel = image.shape\n\n    if width == height:\n        resized_image = cv2.resize(image, resize_shape)\n    elif width > height:\n        resized_image = cv2.resize(image, (int(width * float(resize_shape[0])/height), resize_shape[1]))\n        cropping_length = int( (resized_image.shape[1] - resize_shape[0]) / 2)\n        resized_image = resized_image[:,cropping_length:cropping_length+resize_shape[1]]\n    else:\n        resized_image = cv2.resize(image, (resize_shape[0], int(height * float(resize_shape[1])/width)))\n        cropping_length = int( (resized_image.shape[0] - resize_shape[1]) / 2)\n        resized_image = resized_image[cropping_length:cropping_length+resize_shape[0], :]\n\n    return (resized_image - 127.5) / 127.5\n    #return resized_image/127.5 - 1\n\ndef save_visualization(X, (nh, nw), save_path='./vis/sample.jpg'):\n    h,w = X.shape[1], X.shape[2]\n    img = np.zeros((h * nh, w * nw, 3))\n\n    for n,x in enumerate(X):\n        j = n / nw\n        i = n % nw\n        img[j*h:j*h+h, i*w:i*w+w, :] = x\n\n    scipy.misc.imsave(save_path, img)\n"
  },
  {
    "path": "mnist/load.py",
    "content": "import sys\nsys.path.append('..')\n\nimport numpy as np\nimport os\n\ndata_dir = 'data/'\ndef mnist():\n    fd = open(os.path.join(data_dir,'train-images-idx3-ubyte'))\n    loaded = np.fromfile(file=fd,dtype=np.uint8)\n    trX = loaded[16:].reshape((60000,28*28)).astype(float)\n\n    fd = open(os.path.join(data_dir,'train-labels-idx1-ubyte'))\n    loaded = np.fromfile(file=fd,dtype=np.uint8)\n    trY = loaded[8:].reshape((60000))\n\n    fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte'))\n    loaded = np.fromfile(file=fd,dtype=np.uint8)\n    teX = loaded[16:].reshape((10000,28*28)).astype(float)\n\n    fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte'))\n    loaded = np.fromfile(file=fd,dtype=np.uint8)\n    teY = loaded[8:].reshape((10000))\n\n    trY = np.asarray(trY)\n    teY = np.asarray(teY)\n\n    return trX, teX, trY, teY\n\ndef mnist_with_valid_set():\n    trX, teX, trY, teY = mnist()\n\n    train_inds = np.arange(len(trX))\n    np.random.shuffle(train_inds)\n    trX = trX[train_inds]\n    trY = trY[train_inds]\n    #trX, trY = shuffle(trX, trY)\n    vaX = trX[50000:]\n    vaY = trY[50000:]\n    trX = trX[:50000]\n    trY = trY[:50000]\n\n    return trX, vaX, teX, trY, vaY, teY\n"
  },
  {
    "path": "mnist/model.py",
    "content": "#-*- coding: utf-8 -*-\nimport tensorflow as tf\n\ndef batchnormalize(X, eps=1e-8, g=None, b=None):\n    if X.get_shape().ndims == 4:\n        mean = tf.reduce_mean(X, [0,1,2])\n        std = tf.reduce_mean( tf.square(X-mean), [0,1,2] )\n        X = (X-mean) / tf.sqrt(std+eps)\n\n        if g is not None and b is not None:\n            g = tf.reshape(g, [1,1,1,-1])\n            b = tf.reshape(b, [1,1,1,-1])\n            X = X*g + b\n\n    elif X.get_shape().ndims == 2:\n        mean = tf.reduce_mean(X, 0)\n        std = tf.reduce_mean(tf.square(X-mean), 0)\n        X = (X-mean) / tf.sqrt(std+eps)\n\n        if g is not None and b is not None:\n            g = tf.reshape(g, [1,-1])\n            b = tf.reshape(b, [1,-1])\n            X = X*g + b\n\n    else:\n        raise NotImplementedError\n\n    return X\n\ndef lrelu(X, leak=0.2):\n    f1 = 0.5 * (1 + leak)\n    f2 = 0.5 * (1 - leak)\n    return f1 * X + f2 * tf.abs(X)\n\ndef bce(o, t):\n    o = tf.clip_by_value(o, 1e-7, 1. - 1e-7)\n    return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=o, labels=t))\n\nclass DCGAN():\n    def __init__(\n            self,\n            batch_size=100,\n            image_shape=[28,28,1],\n            dim_z=100,\n            dim_y=10,\n            dim_W1=1024,\n            dim_W2=128,\n            dim_W3=64,\n            dim_channel=1,\n            ):\n\n        self.batch_size = batch_size\n        self.image_shape = image_shape\n        self.dim_z = dim_z\n        self.dim_y = dim_y\n\n        self.dim_W1 = dim_W1\n        self.dim_W2 = dim_W2\n        self.dim_W3 = dim_W3\n        self.dim_channel = dim_channel\n\n        self.gen_W1 = tf.Variable(tf.random_normal([dim_z+dim_y, dim_W1], stddev=0.02), name='gen_W1')\n        self.gen_W2 = tf.Variable(tf.random_normal([dim_W1+dim_y, dim_W2*7*7], stddev=0.02), name='gen_W2')\n        self.gen_W3 = tf.Variable(tf.random_normal([5,5,dim_W3,dim_W2+dim_y], stddev=0.02), name='gen_W3')\n        self.gen_W4 = tf.Variable(tf.random_normal([5,5,dim_channel,dim_W3+dim_y], stddev=0.02), name='gen_W4')\n\n        self.discrim_W1 = tf.Variable(tf.random_normal([5,5,dim_channel+dim_y,dim_W3], stddev=0.02), name='discrim_W1')\n        self.discrim_W2 = tf.Variable(tf.random_normal([5,5,dim_W3+dim_y,dim_W2], stddev=0.02), name='discrim_W2')\n        self.discrim_W3 = tf.Variable(tf.random_normal([dim_W2*7*7+dim_y,dim_W1], stddev=0.02), name='discrim_W3')\n        self.discrim_W4 = tf.Variable(tf.random_normal([dim_W1+dim_y,1], stddev=0.02), name='discrim_W4')\n\n    def build_model(self):\n\n        Z = tf.placeholder(tf.float32, [self.batch_size, self.dim_z])\n        Y = tf.placeholder(tf.float32, [self.batch_size, self.dim_y])\n\n        image_real = tf.placeholder(tf.float32, [self.batch_size]+self.image_shape)\n        h4 = self.generate(Z,Y)\n        image_gen = tf.nn.sigmoid(h4)\n        raw_real = self.discriminate(image_real, Y)\n        p_real = tf.nn.sigmoid(raw_real)\n        raw_gen = self.discriminate(image_gen, Y)\n        p_gen = tf.nn.sigmoid(raw_gen)\n        discrim_cost_real = bce(raw_real, tf.ones_like(raw_real))\n        discrim_cost_gen = bce(raw_gen, tf.zeros_like(raw_gen))\n        discrim_cost = discrim_cost_real + discrim_cost_gen\n\n        gen_cost = bce( raw_gen, tf.ones_like(raw_gen) )\n\n        return Z, Y, image_real, discrim_cost, gen_cost, p_real, p_gen\n\n    def discriminate(self, image, Y):\n        yb = tf.reshape(Y, tf.stack([self.batch_size, 1, 1, self.dim_y]))\n        X = tf.concat(axis=3, values=[image, yb*tf.ones([self.batch_size, 28, 28, self.dim_y])])\n\n        h1 = lrelu( tf.nn.conv2d( X, self.discrim_W1, strides=[1,2,2,1], padding='SAME' ))\n        h1 = tf.concat(axis=3, values=[h1, yb*tf.ones([self.batch_size, 14, 14, self.dim_y])])\n\n        h2 = lrelu( batchnormalize( tf.nn.conv2d( h1, self.discrim_W2, strides=[1,2,2,1], padding='SAME')) )\n        h2 = tf.reshape(h2, [self.batch_size, -1])\n        h2 = tf.concat(axis=1, values=[h2, Y])\n\n        h3 = lrelu( batchnormalize( tf.matmul(h2, self.discrim_W3 ) ))\n        h3 = tf.concat(axis=1, values=[h3, Y])\n        \n        h4 = lrelu(batchnormalize(tf.matmul(h3,self.discrim_W4)))\n        \n        return h4\n\n    def generate(self, Z, Y):\n\n        yb = tf.reshape(Y, [self.batch_size, 1, 1, self.dim_y])\n        Z = tf.concat(axis=1, values=[Z,Y])\n        h1 = tf.nn.relu(batchnormalize(tf.matmul(Z, self.gen_W1)))\n        h1 = tf.concat(axis=1, values=[h1, Y])\n        h2 = tf.nn.relu(batchnormalize(tf.matmul(h1, self.gen_W2)))\n        h2 = tf.reshape(h2, [self.batch_size,7,7,self.dim_W2])\n        h2 = tf.concat(axis=3, values=[h2, yb*tf.ones([self.batch_size, 7, 7, self.dim_y])])\n\n        output_shape_l3 = [self.batch_size,14,14,self.dim_W3]\n        h3 = tf.nn.conv2d_transpose(h2, self.gen_W3, output_shape=output_shape_l3, strides=[1,2,2,1])\n        h3 = tf.nn.relu( batchnormalize(h3) )\n        h3 = tf.concat(axis=3, values=[h3, yb*tf.ones([self.batch_size, 14,14,self.dim_y])] )\n\n        output_shape_l4 = [self.batch_size,28,28,self.dim_channel]\n        h4 = tf.nn.conv2d_transpose(h3, self.gen_W4, output_shape=output_shape_l4, strides=[1,2,2,1])\n        return h4\n\n    def samples_generator(self, batch_size):\n        Z = tf.placeholder(tf.float32, [batch_size, self.dim_z])\n        Y = tf.placeholder(tf.float32, [batch_size, self.dim_y])\n\n        yb = tf.reshape(Y, [batch_size, 1, 1, self.dim_y])\n        Z_ = tf.concat(axis=1, values=[Z,Y])\n        h1 = tf.nn.relu(batchnormalize(tf.matmul(Z_, self.gen_W1)))\n        h1 = tf.concat(axis=1, values=[h1, Y])\n        h2 = tf.nn.relu(batchnormalize(tf.matmul(h1, self.gen_W2)))\n        h2 = tf.reshape(h2, [batch_size,7,7,self.dim_W2])\n        h2 = tf.concat(axis=3, values=[h2, yb*tf.ones([batch_size, 7, 7, self.dim_y])])\n\n        output_shape_l3 = [batch_size,14,14,self.dim_W3]\n        h3 = tf.nn.conv2d_transpose(h2, self.gen_W3, output_shape=output_shape_l3, strides=[1,2,2,1])\n        h3 = tf.nn.relu( batchnormalize(h3) )\n        h3 = tf.concat(axis=3, values=[h3, yb*tf.ones([batch_size, 14,14,self.dim_y])] )\n\n        output_shape_l4 = [batch_size,28,28,self.dim_channel]\n        h4 = tf.nn.conv2d_transpose(h3, self.gen_W4, output_shape=output_shape_l4, strides=[1,2,2,1])\n        x = tf.nn.sigmoid(h4)\n        return Z,Y,x\n\n\n"
  },
  {
    "path": "mnist/train.py",
    "content": "import os\nimport numpy as np\nfrom model import *\nfrom util import *\nfrom load import mnist_with_valid_set\n\nn_epochs = 100\nlearning_rate = 0.0002\nbatch_size = 128\nimage_shape = [28,28,1]\ndim_z = 100\ndim_W1 = 1024\ndim_W2 = 128\ndim_W3 = 64\ndim_channel = 1\n\nvisualize_dim=196\n\ntrX, vaX, teX, trY, vaY, teY = mnist_with_valid_set()\n\ndcgan_model = DCGAN(\n        batch_size=batch_size,\n        image_shape=image_shape,\n        dim_z=dim_z,\n        dim_W1=dim_W1,\n        dim_W2=dim_W2,\n        dim_W3=dim_W3,\n        )\n\nZ_tf, Y_tf, image_tf, d_cost_tf, g_cost_tf, p_real, p_gen = dcgan_model.build_model()\nsess = tf.InteractiveSession()\nsaver = tf.train.Saver(max_to_keep=10)\n\ndiscrim_vars = filter(lambda x: x.name.startswith('discrim'), tf.trainable_variables())\ngen_vars = filter(lambda x: x.name.startswith('gen'), tf.trainable_variables())\ndiscrim_vars = [i for i in discrim_vars]\ngen_vars = [i for i in gen_vars]\n\ntrain_op_discrim = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(d_cost_tf, var_list=discrim_vars)\ntrain_op_gen = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(g_cost_tf, var_list=gen_vars)\n\nZ_tf_sample, Y_tf_sample, image_tf_sample = dcgan_model.samples_generator(batch_size=visualize_dim)\n\ntf.global_variables_initializer().run()\n\nZ_np_sample = np.random.uniform(-1, 1, size=(visualize_dim,dim_z))\nY_np_sample = OneHot( np.random.randint(10, size=[visualize_dim]))\niterations = 0\nk = 2\n\nstep = 200\n\nfor epoch in range(n_epochs):\n    index = np.arange(len(trY))\n    np.random.shuffle(index)\n    trX = trX[index]\n    trY = trY[index]\n\n    for start, end in zip(\n            range(0, len(trY), batch_size),\n            range(batch_size, len(trY), batch_size)\n            ):\n\n        Xs = trX[start:end].reshape( [-1, 28, 28, 1]) / 255.\n        Ys = OneHot(trY[start:end])\n        Zs = np.random.uniform(-1, 1, size=[batch_size, dim_z]).astype(np.float32)\n\n        if np.mod( iterations, k ) != 0:\n            _, gen_loss_val = sess.run(\n                    [train_op_gen, g_cost_tf],\n                    feed_dict={\n                        Z_tf:Zs,\n                        Y_tf:Ys\n                        })\n            discrim_loss_val, p_real_val, p_gen_val = sess.run([d_cost_tf,p_real,p_gen], feed_dict={Z_tf:Zs, image_tf:Xs, Y_tf:Ys})\n            print(\"=========== updating G ==========\")\n            print(\"iteration:\", iterations)\n            print(\"gen loss:\", gen_loss_val)\n            print(\"discrim loss:\", discrim_loss_val)\n\n        else:\n            _, discrim_loss_val = sess.run(\n                    [train_op_discrim, d_cost_tf],\n                    feed_dict={\n                        Z_tf:Zs,\n                        Y_tf:Ys,\n                        image_tf:Xs\n                        })\n            gen_loss_val, p_real_val, p_gen_val = sess.run([g_cost_tf, p_real, p_gen], feed_dict={Z_tf:Zs, image_tf:Xs, Y_tf:Ys})\n            print(\"=========== updating D ==========\")\n            print(\"iteration:\", iterations)\n            print(\"gen loss:\", gen_loss_val)\n            print(\"discrim loss:\", discrim_loss_val)\n\n        print(\"Average P(real)=\", p_real_val.mean())\n        print(\"Average P(gen)=\", p_gen_val.mean())\n\n        if np.mod(iterations, step) == 0:\n            generated_samples = sess.run(\n                    image_tf_sample,\n                    feed_dict={\n                        Z_tf_sample:Z_np_sample,\n                        Y_tf_sample:Y_np_sample\n                        })\n            generated_samples = (generated_samples + 1.)/2.\n            save_visualization(generated_samples, (14,14), save_path='./vis/sample_%04d.jpg' % int(iterations/step))\n\n        iterations += 1\n\n"
  },
  {
    "path": "mnist/util.py",
    "content": "import cv2\nimport scipy.misc\nimport numpy as np\n\ndef OneHot(X, n=None, negative_class=0.):\n    X = np.asarray(X).flatten()\n    if n is None:\n        n = np.max(X) + 1\n    Xoh = np.ones((len(X), n)) * negative_class\n    Xoh[np.arange(len(X)), X] = 1.\n    return Xoh\n\n\ndef crop_resize(image_path, resize_shape=(64,64)):\n    image = cv2.imread(image_path)\n    height, width, channel = image.shape\n\n    if width == height:\n        resized_image = cv2.resize(image, resize_shape)\n    elif width > height:\n        resized_image = cv2.resize(image, (int(width * float(resize_shape[0])//height), resize_shape[1]))\n        cropping_length = int( (resized_image.shape[1] - resize_shape[0]) // 2)\n        resized_image = resized_image[:,cropping_length:cropping_length+resize_shape[1]]\n    else:\n        resized_image = cv2.resize(image, (resize_shape[0], int(height * float(resize_shape[1])/width)))\n        cropping_length = int( (resized_image.shape[0] - resize_shape[1]) // 2)\n        resized_image = resized_image[cropping_length:cropping_length+resize_shape[0], :]\n\n    return resized_image/127.5 - 1\n\ndef save_visualization(X, nh_nw, save_path='./vis/sample.jpg'):\n    h,w = X.shape[1], X.shape[2]\n    img = np.zeros((h * nh_nw[0], w * nh_nw[1], 3))\n\n    for n,x in enumerate(X):\n        j = n // nh_nw[1]\n        i = n % nh_nw[1]\n        img[j*h:j*h+h, i*w:i*w+w, :] = x\n\n    scipy.misc.imsave(save_path, img)\n"
  }
]