[
  {
    "path": "Prediction/prediction_start.py",
    "content": "import keras\n\n"
  },
  {
    "path": "Processing/blur_utils.py",
    "content": ""
  },
  {
    "path": "Processing/processing_utils.py",
    "content": "'''\nutility function package for image processing\nby Sibo Zhu, Kieran Xiao Wang\n2017.08.24\n'''\nimport numpy as np\nimport cv2\nimport random\nfrom PIL import Image\nimport PIL\nfrom natsort import natsorted\nimport os, os.path\n\ndef save_image(image_np_array, image_save_path):\n    '''\n    !!! Sibo, Please complete this function !!!\n    :param image_np_array: 3-D numpy array\n    :param image_save_path: string\n    :return:\n    '''\n    im = Image.fromarray(image_np_array)\n    im.save(image_save_path)\n\n\ndef patch_merge_to_one_from_folder(patch_dir):\n    '''\n    merge patches into a whole image\n    !!! Sibo, please complete this function !!!\n    !!! now you are using naming for indicating patch location and blur/no blur, which is fine\n    !!! the patch_dir is supposed to contain all patches of an image(no matter blured or not)\n    !!! you may want to use os.walk\n    !!! if so avoid to have any other .jpg file except image patches(e.g. do not save the whole image in .jpg in that folder)\n    :param patch_dir: [string] directory to the folder that contains all patches(of an image)\n    :return: [3-d array] whole image in np array\n    '''\n    patch_dir = patch_dir\n\n    pi_imgs = []\n    valid_images = [\".jpg\"]\n    for f in os.listdir(patch_dir):\n        ext = os.path.splitext(f)[1]\n        if ext.lower() not in valid_images:\n            continue\n        pi_imgs.append(Image.open(os.path.join(patch_dir, f)))\n    total = len(pi_imgs)\n\n    print(str(len(pi_imgs)) + \" patches in total\")\n\n    #######################################################\n    \"\"\"Loading all the images from directory\"\"\"\n    dir = []\n    valid_images = [\".jpg\"]\n    for f in os.listdir(patch_dir):\n        ext = os.path.splitext(f)[1]\n        if ext.lower() not in valid_images:\n            continue\n        dir.append(os.path.join(patch_dir, f))\n\n    #####################################################\n    \"\"\"concat images with numpy\"\"\"\n\n    def concat_img_horizon(list_imgs):\n        imgs = [PIL.Image.open(i) for i in list_imgs]\n        # pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)\n        min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]\n        #\n        imgs_comb = np.hstack((np.asarray(i.resize(min_shape)) for i in imgs))\n        imgs_comb = PIL.Image.fromarray(imgs_comb)\n        return imgs_comb\n\n    def concat_img_vertical(list_imgs):\n        imgs = [PIL.Image.open(i) for i in list_imgs]\n        # pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)\n        min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]\n        imgs_comb = np.hstack((np.asarray(i.resize(min_shape)) for i in imgs))\n        # for a vertical stacking it is simple: use vstack\n        imgs_comb = np.vstack((np.asarray(i.resize(min_shape)) for i in imgs))\n        imgs_comb = PIL.Image.fromarray(imgs_comb)\n        return imgs_comb\n\n    def concat_temp_horizon(list_imgs):\n        imgs_comb = np.hstack((np.asarray(i) for i in list_imgs))\n        imgs_comb = PIL.Image.fromarray(imgs_comb)\n        return imgs_comb\n\n    #########################################################\n\n\n    \"\"\"counting number of whole pictures in the folder\"\"\"\n    max_index = 0\n    for i in range(len(dir)):\n        flag = int(dir[i].split(\"/\")[-1].split(\",\")[0].split(\"_\")[0])\n        if flag > max_index:\n            max_index = flag\n\n    #############################################\n    \"\"\"placing patches to their certain picture\"\"\"\n    pic_index = {}\n    for elem in range(max_index + 1):\n        pic_index[elem] = []\n\n    for j in range(len(dir)):\n        for k in range(len(pic_index)):\n            if int(dir[j].split(\"/\")[-1].split(\",\")[0].split(\"_\")[0]) == k:\n                pic_index[k].append(dir[j])\n\n    print('the first picture contains ' + str(len(pic_index[0])) + ' patches')\n\n    ##########################################\n    \"\"\"getting the total columns of picture\"\"\"\n\n    def get_total_column(list):\n        max_col = 0\n        for l in range(len(list)):\n            col_flag = int(pic_index[0][l].split(\"/\")[-1].split(\",\")[0].split(\"_\")[1])\n            if col_flag > max_col:\n                max_col = col_flag\n        return max_col\n\n    print (\"this picture's total column is \" + str(get_total_column(pic_index[0])))\n    #########################################\n    \"\"\"getting the total rows of picture\"\"\"\n\n    def get_total_row(list):\n        max_row = 0\n        for o in range(len(list)):\n            row_flag = int(list[o].split(\"/\")[-1].split(\",\")[0].split(\"_\")[2])\n            if row_flag > max_row:\n                max_row = row_flag\n        return max_row\n\n    print (\"this picture's total row is \" + str(get_total_row(pic_index[0])))\n\n    ########################################\n    \"\"\"sorting this picture's patches with order of name\"\"\"\n\n    def sort_picture_patches(list):\n        return natsorted(list)\n\n    #####################################\n\n    \"\"\"doing global merging\"\"\"\n\n    for a in range(len(pic_index)):\n        max_col = get_total_column(pic_index[a])\n        max_row = get_total_row(pic_index[a])\n        pic_index[a] = sort_picture_patches(pic_index[a])\n        col_index = {}\n        for elem in range(max_col + 1):\n            col_index[elem] = []\n\n        for j in range(len(pic_index[a])):\n            for k in range(max_col):\n                if int(pic_index[a][j].split(\"/\")[-1].split(\",\")[0].split(\"_\")[1]) == k:\n                    col_index[k].append(pic_index[a][j])\n\n        saver = []\n\n        for i in range(max_col - 1):\n            flag = concat_img_vertical(col_index[i])\n            saver.append(flag)\n\n        res = concat_temp_horizon(saver)\n        img = PIL.Image.open(res).convert(\"L\")\n        arr = np.array(img)\n    return arr\n\ndef mass_patch_merge_to_one_from_folder(patch_dir,save_dir):\n    '''\n    After implementing the merging patches back to a whole image,\n    we can also do that same thing to a folder that contains several patches that\n    come from different images and merge and save them back to those original images (with partially\n    blurry) based on the naming habit of slicing images.\n    :param patch_dir: [string] directory to the folder that contains all patches(of an image)\n    :param save_dir: [string] directory to the folder that used to save all those merged images\n    :return: This time there's no return\n    '''\n    patch_dir = patch_dir\n    result_dir = save_dir\n\n    pi_imgs = []\n    valid_images = [\".jpg\"]\n    for f in os.listdir(patch_dir):\n        ext = os.path.splitext(f)[1]\n        if ext.lower() not in valid_images:\n            continue\n        pi_imgs.append(Image.open(os.path.join(patch_dir, f)))\n    total = len(pi_imgs)\n\n    print(str(len(pi_imgs)) + \" patches in total\")\n\n    #######################################################\n    \"\"\"Loading all the images from directory\"\"\"\n    dir = []\n    valid_images = [\".jpg\"]\n    for f in os.listdir(patch_dir):\n        ext = os.path.splitext(f)[1]\n        if ext.lower() not in valid_images:\n            continue\n        dir.append(os.path.join(patch_dir, f))\n\n\n    #####################################################\n    \"\"\"concat images with numpy\"\"\"\n\n    def concat_img_horizon(list_imgs):\n        imgs = [PIL.Image.open(i) for i in list_imgs]\n        # pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)\n        min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]\n        #\n        imgs_comb = np.hstack((np.asarray(i.resize(min_shape)) for i in imgs))\n        imgs_comb = PIL.Image.fromarray(imgs_comb)\n        return imgs_comb\n\n    def concat_img_vertical(list_imgs):\n        imgs = [PIL.Image.open(i) for i in list_imgs]\n        # pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)\n        min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]\n        imgs_comb = np.hstack((np.asarray(i.resize(min_shape)) for i in imgs))\n        # for a vertical stacking it is simple: use vstack\n        imgs_comb = np.vstack((np.asarray(i.resize(min_shape)) for i in imgs))\n        imgs_comb = PIL.Image.fromarray(imgs_comb)\n        return imgs_comb\n\n    def concat_temp_horizon(list_imgs):\n        imgs_comb = np.hstack((np.asarray(i) for i in list_imgs))\n        imgs_comb = PIL.Image.fromarray(imgs_comb)\n        return imgs_comb\n\n    #########################################################\n\n\n    \"\"\"counting number of whole pictures in the folder\"\"\"\n    max_index = 0\n    for i in range(len(dir)):\n        flag = int(dir[i].split(\"/\")[-1].split(\",\")[0].split(\"_\")[0])\n        if flag > max_index:\n            max_index = flag\n\n    #############################################\n    \"\"\"placing patches to their certain picture\"\"\"\n    pic_index = {}\n    for elem in range(max_index + 1):\n        pic_index[elem] = []\n\n    for j in range(len(dir)):\n        for k in range(len(pic_index)):\n            if int(dir[j].split(\"/\")[-1].split(\",\")[0].split(\"_\")[0]) == k:\n                pic_index[k].append(dir[j])\n\n    print('the first picture contains ' + str(len(pic_index[0])) + ' patches')\n\n    ##########################################\n    \"\"\"getting the total columns of picture\"\"\"\n\n    def get_total_column(list):\n        max_col = 0\n        for l in range(len(list)):\n            col_flag = int(pic_index[0][l].split(\"/\")[-1].split(\",\")[0].split(\"_\")[1])\n            if col_flag > max_col:\n                max_col = col_flag\n        return max_col\n\n    print (\"this picture's total column is \" + str(get_total_column(pic_index[0])))\n    #########################################\n    \"\"\"getting the total rows of picture\"\"\"\n\n    def get_total_row(list):\n        max_row = 0\n        for o in range(len(list)):\n            row_flag = int(list[o].split(\"/\")[-1].split(\",\")[0].split(\"_\")[2])\n            if row_flag > max_row:\n                max_row = row_flag\n        return max_row\n\n    print (\"this picture's total row is \" + str(get_total_row(pic_index[0])))\n\n    ########################################\n    \"\"\"sorting this picture's patches with order of name\"\"\"\n\n    def sort_picture_patches(list):\n        return natsorted(list)\n\n    #####################################\n\n    \"\"\"doing global merging\"\"\"\n\n    for a in range(len(pic_index)):\n        max_col = get_total_column(pic_index[a])\n        max_row = get_total_row(pic_index[a])\n        pic_index[a] = sort_picture_patches(pic_index[a])\n        col_index = {}\n        for elem in range(max_col + 1):\n            col_index[elem] = []\n\n        for j in range(len(pic_index[a])):\n            for k in range(max_col):\n                if int(pic_index[a][j].split(\"/\")[-1].split(\",\")[0].split(\"_\")[1]) == k:\n                    col_index[k].append(pic_index[a][j])\n\n        saver = []\n\n        for i in range(max_col - 1):\n            flag = concat_img_vertical(col_index[i])\n            saver.append(flag)\n\n        res = concat_temp_horizon(saver)\n        res.save(result_dir + str(a) + '.jpg')\n\n\n\ndef image_to_patch(image_path, patch_size, patch_dir):\n    '''\n    cut an image into patch with certain size\n    !!! Sibo, please complete this function !!!\n\n    :param image_path: [string] path to the image(e.g. ./whole_image.jpg)\n    :param patch_size: [tuple] i.g. (30(length),30(witch))\n    :param patch_dir: [string] dir where to save the patch dir.(patch dir is defined to be the folder that contains all\n    image patches of an image)\n    :return:\n    '''\n    img = Image.open(image_path)\n    (imageWidth, imageHeight) = img.size\n    gridx = patch_size\n    gridy = patch_size\n    rangex = img.width / gridx\n    rangey = img.height / gridy\n    print rangex * rangey\n    for x in xrange(rangex):\n        for y in xrange(rangey):\n            bbox = (x * gridx, y * gridy, x * gridx + gridx, y * gridy + gridy)\n            slice_bit = img.crop(bbox)\n            slice_bit.save(patch_dir + str(x) + '_' + str(y) + '.jpg', optimize=True,\n                           bits=6)\n            print(patch_dir + str(x) + '_' + str(y) + '.jpg')\n    print(imageWidth)\n\n\ndef directory_to_patch(patch_size,original_path,no_blur_path,blur_path,all_img_path):\n    '''\n    We take a directory that contains several whole pictures and cut then into custom size of patches,\n    then apply 50% chance blur and non-blur to those patches, save them into blurry folder, non-blurry folder,\n    and a folder that contains all blurry and non-blurry patches with order.\n    :param patch_size: [integer] The custom patch size we want, e.g:for 30x30 patch, enter '30'\n    :param original_path: [string] The original path that contains all the original pictures without any modification\n    :param no_blur_path: [string] The destination path that contains all the non-blurry patches with order\n    :param blur_path: [string] The destination path that contains all the blurry patches with order\n    :param all_img_path: [string] The destination path that contains all the patches with order\n    :return: There's no return in this function, all the modified patches are saved into the destination path\n    '''\n    #motion blur preset\n    size = 15\n    gridx = patch_size\n    gridy = patch_size\n    kernel_motion_blur = np.zeros((size, size))\n    kernel_motion_blur[int((size - 1) / 2), :] = np.ones(size)\n    kernel_motion_blur = kernel_motion_blur / size\n\n    # go through every image in source folder\n    print('begin loading images')\n    pi_imgs = []\n    cv_imgs = []\n    valid_images = [\".jpg\"]\n    for f in os.listdir(original_path):\n        ext = os.path.splitext(f)[1]\n        if ext.lower() not in valid_images:\n            continue\n        pi_imgs.append(Image.open(os.path.join(original_path, f)))\n        cv_imgs.append(cv2.imread(os.path.join(original_path, f)))\n    print('finished loading images')\n    #\n\n    # looping to create blurry and non-blurry images in 50% chance\n    for i in range(len(pi_imgs)):\n        img = pi_imgs[i]\n        (imageWidth, imageHeight) = img.size\n\n        rangex = imageWidth / gridx\n        rangey = imageHeight / gridy\n        for x in xrange(rangex):\n            for y in xrange(rangey):\n\n                bbox = (x * gridx, y * gridy, x * gridx + gridx, y * gridy + gridy)\n                slice_bit = img.crop(bbox)\n                if random.randrange(2) == 0:\n                    slice_bit.save(no_blur_path + str(i) + '_' + str(x) + '_' + str(y) + ',noblur.jpg', optimize=True,\n                                   bits=6)\n                    slice_bit.save(all_img_path + str(i) + '_' + str(x) + '_' + str(y) + ',noblur.jpg', optimize=True,\n                                   bits=6)\n                    print(str(i))\n                else:\n                    slice_bit.save(blur_path + str(i) + '_' + str(x) + '_' + str(y) + ',blur.jpg', optimize=True, bits=6)\n                    img1 = cv2.imread(blur_path + str(i) + '_' + str(x) + '_' + str(y) + ',blur.jpg')\n                    output = cv2.filter2D(img1, -1, kernel_motion_blur)\n                    cv2.imwrite(blur_path + str(i) + '_' + str(x) + '_' + str(y) + ',blur.jpg', output)\n                    cv2.imwrite(all_img_path + str(i) + '_' + str(x) + '_' + str(y) + ',blur.jpg', output)\n                    print(str(i))\n\n"
  },
  {
    "path": "README.md",
    "content": "# MotionBlur-detection-by-CNN\n\n\n```\nTo run the cnn model, just enter \"cnn.py\" and run the code. It might take \ncouple hours to run it if you are using personal computers. Usinga server \nor GPU to run this code would significantly lower the running time.\n```\n## Abstract\n\n```\nOur project aims to detect motion blur from a single, blurry image. We propose\na deep learning approach to predict the probabilistic distribution of motion blur at\nthe patch level using a Convolutional Neural Network (CNN).\n```\n## 1 Our approach\n\n```\nWe approached the problem by slicing 100 images into 30x30 patches, and applied our own motion\nblur algorithm to them (with a random rate of 50%). We then labeled the blurry and non-blurry\npatches with 0s and 1s (0 for still, 1 for blurry), and loaded the modified images in as our training\ndata.\n```\n```\n1.1 Generating the training data\n```\nWe generated the training data using images from thePascal Visual Object Classes Challenge 2010\n(VOC2010)data set. Our work was done in Python using thePIL,numpy,opency, andoslibraries.\n\n```\nOnce we had the original images fromPascal, we had to modify them to fit our needs. We needed to\nhave 100 images, each partially blurred and with a corresponding matrix indicating which part of the\nimage is blurred.\nWe achieved this by:\n```\n1. Making a blurred copy of the original image.\n2. Cutting both images (original and blurry) into 30 × 30 patches.\n3. Creating a 2D List in Python of size 30 × 30 , to represent each image patch We initialize\n    each element to 0 (to represent non-blurry).\n4. Picking half the patches from the list and marking them as 1 (to represent blurry).\n5. Putting the final image together to get a partially-blurred, qualifying image (and its corre-\n    sponding matrix).\n6. Saving the image as \"n.jpg\" (where n is the serial number of the image), and adding the\n    matrix to a list (to form a 3D ’list of lists’) containing the matrices of all the image.\n\n\n\n![Image](https://github.com/Sibozhu/MotionBlur-detection-by-CNN/blob/master/Report/images/blur.png?raw=true)\n```\nImage of the original-to-blur process.\n```\n![Image](https://github.com/Sibozhu/MotionBlur-detection-by-CNN/blob/master/Report/images/patches.png?raw=true)\n\n```\nImage of the image splitting process.\n```\n\n![Image](https://github.com/Sibozhu/MotionBlur-detection-by-CNN/blob/master/Report/images/matrix.png?raw=true)\n\n```\nThe final image with its corresponding matrix.\n```\n\nWe repeat the above for all 100 images, until we end up with a folder containing partially-blurred\nimages\"0.jpg\"through\"100.jpg\", and a 3D list (named\"labels\") that contains 100 matrices. This\nlets us access the matrix for image\"31.jpg\", for example, by querying for\"labels[31]\".\n\n## 2 Learning the Convolutional Neural Network (CNN)\n\nOnce we had the prepared images,we loaded them into our training set.We ran into a prob-\nlem loading the images into anumpyarray, where our images were of the form (30,30,3), while\ntheKeras.Conv2Dlayer required input to be of the form (3,30,30). We solved this by using the\nnumpy.swapexes()function to alter the images’ shape in order to fit the convolutional layer.\n\nWe then apply the CNN learning model. First, we apply a Convolution2D layer with 7 × 7 filters,\nfollowed by aReLUfunction. TheConvlayer’s parameters consiste of a set of learnable filters. Each\nfilter is small spatially, but extends through the depth of the input volume.\n\nDuring the forward pass, we slide each filter across the width and height of the input volume and\ncompute the dot products between the entries of the filter and the input at any position.ReLUis the\nrectifier function- an activation function that can be used by neurons, just like any other activation\nfunction. A node using the rectifier activation function is called aReLU node.ReLUsets all negative\nvalues in the matrix x to 0, and all other values are kept constant. ReLU us computed after the\nconvolution, and thus a nonlinear activation function (liketanhorsigmoid).\n\nAfter that,we add aMaxPooling2Dlayerwith a pool size of 2 × 2. MaxPooling is a sample-\nbased discretization process. The objective is to down-sample an input representation, reducing\nits dimensionality and allowing for assumptions to be made about features contained in the binned\nsub-regions.\n\nWe thenadd aDropoutlayerwith dropout rate of 0.2, which makes our learning process faster.\nDropout randomly ignoring nodes is useful in CNN models because it prevents interdependencies\nfrom emerging between nodes. This allows the network to learn more and form a more robust\nrelationship. We then do the’Conv2D, ReLU, MaxPooling2D, Dropout’circle again. Finally, we add\na fully-connected layer withReLU, and thensoftmaxthe result.Softmaxis a classifier at the end of\nthe neural network — a logistic regression to regularize outputs to a value between 0 and 1.\n\nWe set our model’s learning rate to be 0. 01. This might generally be too big, but we made this\ndecision for the sake of brevity - it was the fastest way to show a result. We chose a batch size of 126\n(because we had large training data). We also choseAdamas our optimizer as it’s the most efficient\noptimizer for our model.\n\nAfter training with 100 epochs,we had testing accuracy of 92%, which is a very optimal rate for\nour model. Our training model is saved in an HDF5 file,\"motionblur.h5\".\n\n![Image](https://github.com/Sibozhu/MotionBlur-detection-by-CNN/blob/master/Report/images/accuracy.jpeg?raw=true)\n\n\n## 3 Conclusion\n\nIn this report, we have proposed a novel CNN-based motion blur detection apporach. We learn an\neffective CNN for estimating motion blur from local patches. In the future, we are interested in\ndesigning a CNN for eastimating motion kernels. We are also interested in design a CNN non-uniform\nmotion deblurring method.\n\n\n## Acknowledgement\n\nThis report has been prepared for the Boston University Machine Learning course (CS 542), taken\nover the Summer 2, 2017 semester by the listed authors. It is intended to be used in compliance of\nthe requirements of the course.\n\n## References\n\n[1] Jian Sun, Wenfei Cao, Zongben Xu, Jean Ponce. Learning a convolutional neural network for non-uniform\nmotion blur removal. CVPR 2015 - IEEE Conference on Computer Vision and Pattern Recognition 2015, Jun\n2015, Boston, United States. IEEE, 2015,.\n\n[2] “Visual Object Classes Challenge 2010 (VOC2010).” The PASCAL Visual Object Classes Challenge 2010\n(VOC2010), PASCAL, 2010, host.robots.ox.ac.uk/pascal/VOC/voc2010/.\n\n\n\n\n\n\n"
  },
  {
    "path": "Report/nicefrac.sty",
    "content": "%%\n%% This is file `nicefrac.sty',\n%% generated with the docstrip utility.\n%%\n%% The original source files were:\n%%\n%% units.dtx  (with options: `nicefrac')\n%% \n%% LaTeX package for typesetting nice fractions\n%% \n%% Copyright (C) 1998 Axel Reichert\n%% See the files README and COPYING.\n%% \n%% \\CharacterTable\n%%  {Upper-case    \\A\\B\\C\\D\\E\\F\\G\\H\\I\\J\\K\\L\\M\\N\\O\\P\\Q\\R\\S\\T\\U\\V\\W\\X\\Y\\Z\n%%   Lower-case    \\a\\b\\c\\d\\e\\f\\g\\h\\i\\j\\k\\l\\m\\n\\o\\p\\q\\r\\s\\t\\u\\v\\w\\x\\y\\z\n%%   Digits        \\0\\1\\2\\3\\4\\5\\6\\7\\8\\9\n%%   Exclamation   \\!     Double quote  \\\"     Hash (number) \\#\n%%   Dollar        \\$     Percent       \\%     Ampersand     \\&\n%%   Acute accent  \\'     Left paren    \\(     Right paren   \\)\n%%   Asterisk      \\*     Plus          \\+     Comma         \\,\n%%   Minus         \\-     Point         \\.     Solidus       \\/\n%%   Colon         \\:     Semicolon     \\;     Less than     \\<\n%%   Equals        \\=     Greater than  \\>     Question mark \\?\n%%   Commercial at \\@     Left bracket  \\[     Backslash     \\\\\n%%   Right bracket \\]     Circumflex    \\^     Underscore    \\_\n%%   Grave accent  \\`     Left brace    \\{     Vertical bar  \\|\n%%   Right brace   \\}     Tilde         \\~}\n\\NeedsTeXFormat{LaTeX2e}[1995/12/01]\n\\ProvidesPackage{nicefrac}[1998/08/04 v0.9b Nice fractions]\n\\newlength{\\L@UnitsRaiseDisplaystyle}\n\\newlength{\\L@UnitsRaiseTextstyle}\n\\newlength{\\L@UnitsRaiseScriptstyle}\n\\RequirePackage{ifthen}\n\\DeclareRobustCommand*{\\@UnitsNiceFrac}[3][]{%\n  \\ifthenelse{\\boolean{mmode}}{%\n    \\settoheight{\\L@UnitsRaiseDisplaystyle}{%\n      \\ensuremath{\\displaystyle#1{M}}%\n    }%\n    \\settoheight{\\L@UnitsRaiseTextstyle}{%\n      \\ensuremath{\\textstyle#1{M}}%\n    }%\n    \\settoheight{\\L@UnitsRaiseScriptstyle}{%\n      \\ensuremath{\\scriptstyle#1{M}}%\n    }%\n    \\settoheight{\\@tempdima}{%\n      \\ensuremath{\\scriptscriptstyle#1{M}}%\n    }%\n    \\addtolength{\\L@UnitsRaiseDisplaystyle}{%\n      -\\L@UnitsRaiseScriptstyle%\n    }%\n    \\addtolength{\\L@UnitsRaiseTextstyle}{%\n      -\\L@UnitsRaiseScriptstyle%\n    }%\n    \\addtolength{\\L@UnitsRaiseScriptstyle}{-\\@tempdima}%\n    \\mathchoice\n      {%\n        \\raisebox{\\L@UnitsRaiseDisplaystyle}{%\n          \\ensuremath{\\scriptstyle#1{#2}}%\n        }%\n      }%\n      {%\n        \\raisebox{\\L@UnitsRaiseTextstyle}{%\n          \\ensuremath{\\scriptstyle#1{#2}}%\n        }%\n      }%\n      {%\n        \\raisebox{\\L@UnitsRaiseScriptstyle}{%\n          \\ensuremath{\\scriptscriptstyle#1{#2}}%\n        }%\n      }%\n      {%\n        \\raisebox{\\L@UnitsRaiseScriptstyle}{%\n          \\ensuremath{\\scriptscriptstyle#1{#2}}%\n        }%\n      }%\n    \\mkern-2mu/\\mkern-1mu%\n    \\bgroup\n      \\mathchoice\n        {\\scriptstyle}%\n        {\\scriptstyle}%\n        {\\scriptscriptstyle}%\n        {\\scriptscriptstyle}%\n      #1{#3}%\n    \\egroup\n  }%\n  {%\n    \\settoheight{\\L@UnitsRaiseTextstyle}{#1{M}}%\n    \\settoheight{\\@tempdima}{%\n      \\ensuremath{%\n        \\mbox{\\fontsize\\sf@size\\z@\\selectfont#1{M}}%\n      }%\n    }%\n    \\addtolength{\\L@UnitsRaiseTextstyle}{-\\@tempdima}%\n    \\raisebox{\\L@UnitsRaiseTextstyle}{%\n      \\ensuremath{%\n        \\mbox{\\fontsize\\sf@size\\z@\\selectfont#1{#2}}%\n      }%\n    }%\n    \\ensuremath{\\mkern-2mu}/\\ensuremath{\\mkern-1mu}%\n    \\ensuremath{%\n      \\mbox{\\fontsize\\sf@size\\z@\\selectfont#1{#3}}%\n    }%\n  }%\n}\n\\DeclareRobustCommand*{\\@UnitsUglyFrac}[3][]{%\n  \\ifthenelse{\\boolean{mmode}}{%\n    \\frac{#1{#2}}{#1{#3}}%\n  }%\n  {%\n    #1{#2}/#1{#3}%\n    \\PackageWarning{nicefrac}{%\n      You used \\protect\\nicefrac\\space or\n      \\protect\\unitfrac\\space in text mode\\MessageBreak\n      and specified the ``ugly'' option.\\MessageBreak\n      The fraction may be ambiguous or wrong.\\MessageBreak\n      Please make sure the denominator is\n      correct.\\MessageBreak\n      If it is, you can safely ignore\\MessageBreak\n      this warning\n    }%\n  }%\n}\n\\DeclareOption{nice}{%\n  \\DeclareRobustCommand*{\\nicefrac}{\\@UnitsNiceFrac}%\n}\n\\DeclareOption{ugly}{%\n  \\DeclareRobustCommand*{\\nicefrac}{\\@UnitsUglyFrac}%\n}\n\\ExecuteOptions{nice}\n\\ProcessOptions*\n\\endinput\n%%\n%% End of file `nicefrac.sty'.\n"
  },
  {
    "path": "Report/nips_2017.aux",
    "content": "\\relax \n\\providecommand\\hyper@newdestlabel[2]{}\n\\providecommand\\HyperFirstAtBeginDocument{\\AtBeginDocument}\n\\HyperFirstAtBeginDocument{\\ifx\\hyper@anchor\\@undefined\n\\global\\let\\oldcontentsline\\contentsline\n\\gdef\\contentsline#1#2#3#4{\\oldcontentsline{#1}{#2}{#3}}\n\\global\\let\\oldnewlabel\\newlabel\n\\gdef\\newlabel#1#2{\\newlabelxx{#1}#2}\n\\gdef\\newlabelxx#1#2#3#4#5#6{\\oldnewlabel{#1}{{#2}{#3}}}\n\\AtEndDocument{\\ifx\\hyper@anchor\\@undefined\n\\let\\contentsline\\oldcontentsline\n\\let\\newlabel\\oldnewlabel\n\\fi}\n\\fi}\n\\global\\let\\hyper@last\\relax \n\\gdef\\HyperFirstAtBeginDocument#1{#1}\n\\providecommand\\HyField@AuxAddToFields[1]{}\n\\providecommand\\HyField@AuxAddToCoFields[2]{}\n\\@writefile{toc}{\\contentsline {section}{\\numberline {1}Our approach}{1}{section.1}}\n\\@writefile{toc}{\\contentsline {subsection}{\\numberline {1.1}Generating the training data}{1}{subsection.1.1}}\n\\@writefile{toc}{\\contentsline {section}{\\numberline {2}Learning the Convolutional Neural Network (CNN)}{3}{section.2}}\n\\@writefile{toc}{\\contentsline {paragraph}{We then apply the CNN learning model.}{3}{section*.1}}\n\\@writefile{toc}{\\contentsline {paragraph}{During the forward pass,}{3}{section*.2}}\n\\@writefile{toc}{\\contentsline {paragraph}{We set our model's learning rate to be $0.01$.}{3}{section*.3}}\n\\@writefile{toc}{\\contentsline {section}{\\numberline {3}Conclusion}{3}{section.3}}\n"
  },
  {
    "path": "Report/nips_2017.log",
    "content": "This is pdfTeX, Version 3.14159265-2.6-1.40.17 (TeX Live 2016/Debian) (preloaded format=pdflatex 2017.7.8)  12 AUG 2017 15:30\nentering extended mode\n restricted \\write18 enabled.\n %&-line parsing enabled.\n**nips_2017.tex\n(./nips_2017.tex\nLaTeX2e <2017/01/01> patch level 3\nBabel <3.9r> and hyphenation patterns for 3 language(s) loaded.\n(/usr/share/texlive/texmf-dist/tex/latex/base/article.cls\nDocument Class: article 2014/09/29 v1.4h Standard LaTeX document class\n(/usr/share/texlive/texmf-dist/tex/latex/base/size10.clo\nFile: size10.clo 2014/09/29 v1.4h Standard LaTeX file (size option)\n)\n\\c@part=\\count79\n\\c@section=\\count80\n\\c@subsection=\\count81\n\\c@subsubsection=\\count82\n\\c@paragraph=\\count83\n\\c@subparagraph=\\count84\n\\c@figure=\\count85\n\\c@table=\\count86\n\\abovecaptionskip=\\skip41\n\\belowcaptionskip=\\skip42\n\\bibindent=\\dimen102\n) (./nips_2017.sty\nPackage: nips_2017 2017/03/20 NIPS 2017 submission/camera-ready style file\n\n(/usr/share/texlive/texmf-dist/tex/latex/natbib/natbib.sty\nPackage: natbib 2010/09/13 8.31b (PWD, AO)\n\\bibhang=\\skip43\n\\bibsep=\\skip44\nLaTeX Info: Redefining \\cite on input line 694.\n\\c@NAT@ctr=\\count87\n)\n(/usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty\nPackage: geometry 2010/09/12 v5.6 Page Geometry\n\n(/usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty\nPackage: keyval 2014/10/28 v1.15 key=value parser (DPC)\n\\KV@toks@=\\toks14\n)\n(/usr/share/texlive/texmf-dist/tex/generic/oberdiek/ifpdf.sty\nPackage: ifpdf 2016/05/14 v3.1 Provides the ifpdf switch\n)\n(/usr/share/texlive/texmf-dist/tex/generic/oberdiek/ifvtex.sty\nPackage: ifvtex 2016/05/16 v1.6 Detect VTeX and its facilities (HO)\nPackage ifvtex Info: VTeX not detected.\n)\n(/usr/share/texlive/texmf-dist/tex/generic/ifxetex/ifxetex.sty\nPackage: ifxetex 2010/09/12 v0.6 Provides ifxetex conditional\n)\n\\Gm@cnth=\\count88\n\\Gm@cntv=\\count89\n\\c@Gm@tempcnt=\\count90\n\\Gm@bindingoffset=\\dimen103\n\\Gm@wd@mp=\\dimen104\n\\Gm@odd@mp=\\dimen105\n\\Gm@even@mp=\\dimen106\n\\Gm@layoutwidth=\\dimen107\n\\Gm@layoutheight=\\dimen108\n\\Gm@layouthoffset=\\dimen109\n\\Gm@layoutvoffset=\\dimen110\n\\Gm@dimlist=\\toks15\n)\n\\@nipsabovecaptionskip=\\skip45\n\\@nipsbelowcaptionskip=\\skip46\n)\n(/usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty\nPackage: inputenc 2015/03/17 v1.2c Input encoding file\n\\inpenc@prehook=\\toks16\n\\inpenc@posthook=\\toks17\n\n(/usr/share/texlive/texmf-dist/tex/latex/base/utf8.def\nFile: utf8.def 2017/01/28 v1.1t UTF-8 support for inputenc\nNow handling font encoding OML ...\n... no UTF-8 mapping file for font encoding OML\nNow handling font encoding T1 ...\n... processing UTF-8 mapping file for font encoding T1\n\n(/usr/share/texlive/texmf-dist/tex/latex/base/t1enc.dfu\nFile: t1enc.dfu 2017/01/28 v1.1t UTF-8 support for inputenc\n   defining Unicode char U+00A0 (decimal 160)\n   defining Unicode char U+00A1 (decimal 161)\n   defining Unicode char U+00A3 (decimal 163)\n   defining Unicode char U+00AB (decimal 171)\n   defining Unicode char U+00AD (decimal 173)\n   defining Unicode char U+00BB (decimal 187)\n   defining Unicode char U+00BF (decimal 191)\n   defining Unicode char U+00C0 (decimal 192)\n   defining Unicode char U+00C1 (decimal 193)\n   defining Unicode char U+00C2 (decimal 194)\n   defining Unicode char U+00C3 (decimal 195)\n   defining Unicode char U+00C4 (decimal 196)\n   defining Unicode char U+00C5 (decimal 197)\n   defining Unicode char U+00C6 (decimal 198)\n   defining Unicode char U+00C7 (decimal 199)\n   defining Unicode char U+00C8 (decimal 200)\n   defining Unicode char U+00C9 (decimal 201)\n   defining Unicode char U+00CA (decimal 202)\n   defining Unicode char U+00CB (decimal 203)\n   defining Unicode char U+00CC (decimal 204)\n   defining Unicode char U+00CD (decimal 205)\n   defining Unicode char U+00CE (decimal 206)\n   defining Unicode char U+00CF (decimal 207)\n   defining Unicode char U+00D0 (decimal 208)\n   defining Unicode char U+00D1 (decimal 209)\n   defining Unicode char U+00D2 (decimal 210)\n   defining Unicode char U+00D3 (decimal 211)\n   defining Unicode char U+00D4 (decimal 212)\n   defining Unicode char U+00D5 (decimal 213)\n   defining Unicode char U+00D6 (decimal 214)\n   defining Unicode char U+00D8 (decimal 216)\n   defining Unicode char U+00D9 (decimal 217)\n   defining Unicode char U+00DA (decimal 218)\n   defining Unicode char U+00DB (decimal 219)\n   defining Unicode char U+00DC (decimal 220)\n   defining Unicode char U+00DD (decimal 221)\n   defining Unicode char U+00DE (decimal 222)\n   defining Unicode char U+00DF (decimal 223)\n   defining Unicode char U+00E0 (decimal 224)\n   defining Unicode char U+00E1 (decimal 225)\n   defining Unicode char U+00E2 (decimal 226)\n   defining Unicode char U+00E3 (decimal 227)\n   defining Unicode char U+00E4 (decimal 228)\n   defining Unicode char U+00E5 (decimal 229)\n   defining Unicode char U+00E6 (decimal 230)\n   defining Unicode char U+00E7 (decimal 231)\n   defining Unicode char U+00E8 (decimal 232)\n   defining Unicode char U+00E9 (decimal 233)\n   defining Unicode char U+00EA (decimal 234)\n   defining Unicode char U+00EB (decimal 235)\n   defining Unicode char U+00EC (decimal 236)\n   defining Unicode char U+00ED (decimal 237)\n   defining Unicode char U+00EE (decimal 238)\n   defining Unicode char U+00EF (decimal 239)\n   defining Unicode char U+00F0 (decimal 240)\n   defining Unicode char U+00F1 (decimal 241)\n   defining Unicode char U+00F2 (decimal 242)\n   defining Unicode char U+00F3 (decimal 243)\n   defining Unicode char U+00F4 (decimal 244)\n   defining Unicode char U+00F5 (decimal 245)\n   defining Unicode char U+00F6 (decimal 246)\n   defining Unicode char U+00F8 (decimal 248)\n   defining Unicode char U+00F9 (decimal 249)\n   defining Unicode char U+00FA (decimal 250)\n   defining Unicode char U+00FB (decimal 251)\n   defining Unicode char U+00FC (decimal 252)\n   defining Unicode char U+00FD (decimal 253)\n   defining Unicode char U+00FE (decimal 254)\n   defining Unicode char U+00FF (decimal 255)\n   defining Unicode char U+0100 (decimal 256)\n   defining Unicode char U+0101 (decimal 257)\n   defining Unicode char U+0102 (decimal 258)\n   defining Unicode char U+0103 (decimal 259)\n   defining Unicode char U+0104 (decimal 260)\n   defining Unicode char U+0105 (decimal 261)\n   defining Unicode char U+0106 (decimal 262)\n   defining Unicode char U+0107 (decimal 263)\n   defining Unicode char U+0108 (decimal 264)\n   defining Unicode char U+0109 (decimal 265)\n   defining Unicode char U+010A (decimal 266)\n   defining Unicode char U+010B (decimal 267)\n   defining Unicode char U+010C (decimal 268)\n   defining Unicode char U+010D (decimal 269)\n   defining Unicode char U+010E (decimal 270)\n   defining Unicode char U+010F (decimal 271)\n   defining Unicode char U+0110 (decimal 272)\n   defining Unicode char U+0111 (decimal 273)\n   defining Unicode char U+0112 (decimal 274)\n   defining Unicode char U+0113 (decimal 275)\n   defining Unicode char U+0114 (decimal 276)\n   defining Unicode char U+0115 (decimal 277)\n   defining Unicode char U+0116 (decimal 278)\n   defining Unicode char U+0117 (decimal 279)\n   defining Unicode char U+0118 (decimal 280)\n   defining Unicode char U+0119 (decimal 281)\n   defining Unicode char U+011A (decimal 282)\n   defining Unicode char U+011B (decimal 283)\n   defining Unicode char U+011C (decimal 284)\n   defining Unicode char U+011D (decimal 285)\n   defining Unicode char U+011E (decimal 286)\n   defining Unicode char U+011F (decimal 287)\n   defining Unicode char U+0120 (decimal 288)\n   defining Unicode char U+0121 (decimal 289)\n   defining Unicode char U+0122 (decimal 290)\n   defining Unicode char U+0123 (decimal 291)\n   defining Unicode char U+0124 (decimal 292)\n   defining Unicode char U+0125 (decimal 293)\n   defining Unicode char U+0128 (decimal 296)\n   defining Unicode char U+0129 (decimal 297)\n   defining Unicode char U+012A (decimal 298)\n   defining Unicode char U+012B (decimal 299)\n   defining Unicode char U+012C (decimal 300)\n   defining Unicode char U+012D (decimal 301)\n   defining Unicode char U+012E (decimal 302)\n   defining Unicode char U+012F (decimal 303)\n   defining Unicode char U+0130 (decimal 304)\n   defining Unicode char U+0131 (decimal 305)\n   defining Unicode char U+0132 (decimal 306)\n   defining Unicode char U+0133 (decimal 307)\n   defining Unicode char U+0134 (decimal 308)\n   defining Unicode char U+0135 (decimal 309)\n   defining Unicode char U+0136 (decimal 310)\n   defining Unicode char U+0137 (decimal 311)\n   defining Unicode char U+0139 (decimal 313)\n   defining Unicode char U+013A (decimal 314)\n   defining Unicode char U+013B (decimal 315)\n   defining Unicode char U+013C (decimal 316)\n   defining Unicode char U+013D (decimal 317)\n   defining Unicode char U+013E (decimal 318)\n   defining Unicode char U+0141 (decimal 321)\n   defining Unicode char U+0142 (decimal 322)\n   defining Unicode char U+0143 (decimal 323)\n   defining Unicode char U+0144 (decimal 324)\n   defining Unicode char U+0145 (decimal 325)\n   defining Unicode char U+0146 (decimal 326)\n   defining Unicode char U+0147 (decimal 327)\n   defining Unicode char U+0148 (decimal 328)\n   defining Unicode char U+014A (decimal 330)\n   defining Unicode char U+014B (decimal 331)\n   defining Unicode char U+014C (decimal 332)\n   defining Unicode char U+014D (decimal 333)\n   defining Unicode char U+014E (decimal 334)\n   defining Unicode char U+014F (decimal 335)\n   defining Unicode char U+0150 (decimal 336)\n   defining Unicode char U+0151 (decimal 337)\n   defining Unicode char U+0152 (decimal 338)\n   defining Unicode char U+0153 (decimal 339)\n   defining Unicode char U+0154 (decimal 340)\n   defining Unicode char U+0155 (decimal 341)\n   defining Unicode char U+0156 (decimal 342)\n   defining Unicode char U+0157 (decimal 343)\n   defining Unicode char U+0158 (decimal 344)\n   defining Unicode char U+0159 (decimal 345)\n   defining Unicode char U+015A (decimal 346)\n   defining Unicode char U+015B (decimal 347)\n   defining Unicode char U+015C (decimal 348)\n   defining Unicode char U+015D (decimal 349)\n   defining Unicode char U+015E (decimal 350)\n   defining Unicode char U+015F (decimal 351)\n   defining Unicode char U+0160 (decimal 352)\n   defining Unicode char U+0161 (decimal 353)\n   defining Unicode char U+0162 (decimal 354)\n   defining Unicode char U+0163 (decimal 355)\n   defining Unicode char U+0164 (decimal 356)\n   defining Unicode char U+0165 (decimal 357)\n   defining Unicode char U+0168 (decimal 360)\n   defining Unicode char U+0169 (decimal 361)\n   defining Unicode char U+016A (decimal 362)\n   defining Unicode char U+016B (decimal 363)\n   defining Unicode char U+016C (decimal 364)\n   defining Unicode char U+016D (decimal 365)\n   defining Unicode char U+016E (decimal 366)\n   defining Unicode char U+016F (decimal 367)\n   defining Unicode char U+0170 (decimal 368)\n   defining Unicode char U+0171 (decimal 369)\n   defining Unicode char U+0172 (decimal 370)\n   defining Unicode char U+0173 (decimal 371)\n   defining Unicode char U+0174 (decimal 372)\n   defining Unicode char U+0175 (decimal 373)\n   defining Unicode char U+0176 (decimal 374)\n   defining Unicode char U+0177 (decimal 375)\n   defining Unicode char U+0178 (decimal 376)\n   defining Unicode char U+0179 (decimal 377)\n   defining Unicode char U+017A (decimal 378)\n   defining Unicode char U+017B (decimal 379)\n   defining Unicode char U+017C (decimal 380)\n   defining Unicode char U+017D (decimal 381)\n   defining Unicode char U+017E (decimal 382)\n   defining Unicode char U+01CD (decimal 461)\n   defining Unicode char U+01CE (decimal 462)\n   defining Unicode char U+01CF (decimal 463)\n   defining Unicode char U+01D0 (decimal 464)\n   defining Unicode char U+01D1 (decimal 465)\n   defining Unicode char U+01D2 (decimal 466)\n   defining Unicode char U+01D3 (decimal 467)\n   defining Unicode char U+01D4 (decimal 468)\n   defining Unicode char U+01E2 (decimal 482)\n   defining Unicode char U+01E3 (decimal 483)\n   defining Unicode char U+01E6 (decimal 486)\n   defining Unicode char U+01E7 (decimal 487)\n   defining Unicode char U+01E8 (decimal 488)\n   defining Unicode char U+01E9 (decimal 489)\n   defining Unicode char U+01EA (decimal 490)\n   defining Unicode char U+01EB (decimal 491)\n   defining Unicode char U+01F0 (decimal 496)\n   defining Unicode char U+01F4 (decimal 500)\n   defining Unicode char U+01F5 (decimal 501)\n   defining Unicode char U+0218 (decimal 536)\n   defining Unicode char U+0219 (decimal 537)\n   defining Unicode char U+021A (decimal 538)\n   defining Unicode char U+021B (decimal 539)\n   defining Unicode char U+0232 (decimal 562)\n   defining Unicode char U+0233 (decimal 563)\n   defining Unicode char U+1E02 (decimal 7682)\n   defining Unicode char U+1E03 (decimal 7683)\n   defining Unicode char U+200C (decimal 8204)\n   defining Unicode char U+2010 (decimal 8208)\n   defining Unicode char U+2011 (decimal 8209)\n   defining Unicode char U+2012 (decimal 8210)\n   defining Unicode char U+2013 (decimal 8211)\n   defining Unicode char U+2014 (decimal 8212)\n   defining Unicode char U+2015 (decimal 8213)\n   defining Unicode char U+2018 (decimal 8216)\n   defining Unicode char U+2019 (decimal 8217)\n   defining Unicode char U+201A (decimal 8218)\n   defining Unicode char U+201C (decimal 8220)\n   defining Unicode char U+201D (decimal 8221)\n   defining Unicode char U+201E (decimal 8222)\n   defining Unicode char U+2030 (decimal 8240)\n   defining Unicode char U+2031 (decimal 8241)\n   defining Unicode char U+2039 (decimal 8249)\n   defining Unicode char U+203A (decimal 8250)\n   defining Unicode char U+2423 (decimal 9251)\n   defining Unicode char U+1E20 (decimal 7712)\n   defining Unicode char U+1E21 (decimal 7713)\n)\nNow handling font encoding OT1 ...\n... processing UTF-8 mapping file for font encoding OT1\n\n(/usr/share/texlive/texmf-dist/tex/latex/base/ot1enc.dfu\nFile: ot1enc.dfu 2017/01/28 v1.1t UTF-8 support for inputenc\n   defining Unicode char U+00A0 (decimal 160)\n   defining Unicode char U+00A1 (decimal 161)\n   defining Unicode char U+00A3 (decimal 163)\n   defining Unicode char U+00AD (decimal 173)\n   defining Unicode char U+00B8 (decimal 184)\n   defining Unicode char U+00BF (decimal 191)\n   defining Unicode char U+00C5 (decimal 197)\n   defining Unicode char U+00C6 (decimal 198)\n   defining Unicode char U+00D8 (decimal 216)\n   defining Unicode char U+00DF (decimal 223)\n   defining Unicode char U+00E6 (decimal 230)\n   defining Unicode char U+00EC (decimal 236)\n   defining Unicode char U+00ED (decimal 237)\n   defining Unicode char U+00EE (decimal 238)\n   defining Unicode char U+00EF (decimal 239)\n   defining Unicode char U+00F8 (decimal 248)\n   defining Unicode char U+0131 (decimal 305)\n   defining Unicode char U+0141 (decimal 321)\n   defining Unicode char U+0142 (decimal 322)\n   defining Unicode char U+0152 (decimal 338)\n   defining Unicode char U+0153 (decimal 339)\n   defining Unicode char U+0174 (decimal 372)\n   defining Unicode char U+0175 (decimal 373)\n   defining Unicode char U+0176 (decimal 374)\n   defining Unicode char U+0177 (decimal 375)\n   defining Unicode char U+0218 (decimal 536)\n   defining Unicode char U+0219 (decimal 537)\n   defining Unicode char U+021A (decimal 538)\n   defining Unicode char U+021B (decimal 539)\n   defining Unicode char U+2013 (decimal 8211)\n   defining Unicode char U+2014 (decimal 8212)\n   defining Unicode char U+2018 (decimal 8216)\n   defining Unicode char U+2019 (decimal 8217)\n   defining Unicode char U+201C (decimal 8220)\n   defining Unicode char U+201D (decimal 8221)\n)\nNow handling font encoding OMS ...\n... processing UTF-8 mapping file for font encoding OMS\n\n(/usr/share/texlive/texmf-dist/tex/latex/base/omsenc.dfu\nFile: omsenc.dfu 2017/01/28 v1.1t UTF-8 support for inputenc\n   defining Unicode char U+00A7 (decimal 167)\n   defining Unicode char U+00B6 (decimal 182)\n   defining Unicode char U+00B7 (decimal 183)\n   defining Unicode char U+2020 (decimal 8224)\n   defining Unicode char U+2021 (decimal 8225)\n   defining Unicode char U+2022 (decimal 8226)\n)\nNow handling font encoding OMX ...\n... no UTF-8 mapping file for font encoding OMX\nNow handling font encoding U ...\n... no UTF-8 mapping file for font encoding U\n   defining Unicode char U+00A9 (decimal 169)\n   defining Unicode char U+00AA (decimal 170)\n   defining Unicode char U+00AE (decimal 174)\n   defining Unicode char U+00BA (decimal 186)\n   defining Unicode char U+02C6 (decimal 710)\n   defining Unicode char U+02DC (decimal 732)\n   defining Unicode char U+200C (decimal 8204)\n   defining Unicode char U+2026 (decimal 8230)\n   defining Unicode char U+2122 (decimal 8482)\n   defining Unicode char U+2423 (decimal 9251)\n))\n(/usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty\nPackage: fontenc 2017/02/22 v2.0g Standard LaTeX package\n\n(/usr/share/texlive/texmf-dist/tex/latex/base/t1enc.def\nFile: t1enc.def 2017/02/22 v2.0g Standard LaTeX file\nLaTeX Font Info:    Redeclaring font encoding T1 on input line 48.\n))\n(/usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty\nPackage: hyperref 2016/06/24 v6.83q Hypertext links for LaTeX\n\n(/usr/share/texlive/texmf-dist/tex/generic/oberdiek/hobsub-hyperref.sty\nPackage: hobsub-hyperref 2016/05/16 v1.14 Bundle oberdiek, subset hyperref (HO)\n\n\n(/usr/share/texlive/texmf-dist/tex/generic/oberdiek/hobsub-generic.sty\nPackage: hobsub-generic 2016/05/16 v1.14 Bundle oberdiek, subset generic (HO)\nPackage: hobsub 2016/05/16 v1.14 Construct package bundles (HO)\nPackage: infwarerr 2016/05/16 v1.4 Providing info/warning/error messages (HO)\nPackage: ltxcmds 2016/05/16 v1.23 LaTeX kernel commands for general use (HO)\nPackage: ifluatex 2016/05/16 v1.4 Provides the ifluatex switch (HO)\nPackage ifluatex Info: LuaTeX not detected.\nPackage hobsub Info: Skipping package `ifvtex' (already loaded).\nPackage: intcalc 2016/05/16 v1.2 Expandable calculations with integers (HO)\nPackage hobsub Info: Skipping package `ifpdf' (already loaded).\nPackage: etexcmds 2016/05/16 v1.6 Avoid name clashes with e-TeX commands (HO)\nPackage etexcmds Info: Could not find \\expanded.\n(etexcmds)             That can mean that you are not using pdfTeX 1.50 or\n(etexcmds)             that some package has redefined \\expanded.\n(etexcmds)             In the latter case, load this package earlier.\nPackage: kvsetkeys 2016/05/16 v1.17 Key value parser (HO)\nPackage: kvdefinekeys 2016/05/16 v1.4 Define keys (HO)\nPackage: pdftexcmds 2016/05/21 v0.22 Utility functions of pdfTeX for LuaTeX (HO\n)\nPackage pdftexcmds Info: LuaTeX not detected.\nPackage pdftexcmds Info: \\pdf@primitive is available.\nPackage pdftexcmds Info: \\pdf@ifprimitive is available.\nPackage pdftexcmds Info: \\pdfdraftmode found.\nPackage: pdfescape 2016/05/16 v1.14 Implements pdfTeX's escape features (HO)\nPackage: bigintcalc 2016/05/16 v1.4 Expandable calculations on big integers (HO\n)\nPackage: bitset 2016/05/16 v1.2 Handle bit-vector datatype (HO)\nPackage: uniquecounter 2016/05/16 v1.3 Provide unlimited unique counter (HO)\n)\nPackage hobsub Info: Skipping package `hobsub' (already loaded).\nPackage: letltxmacro 2016/05/16 v1.5 Let assignment for LaTeX macros (HO)\nPackage: hopatch 2016/05/16 v1.3 Wrapper for package hooks (HO)\nPackage: xcolor-patch 2016/05/16 xcolor patch\nPackage: atveryend 2016/05/16 v1.9 Hooks at the very end of document (HO)\nPackage atveryend Info: \\enddocument detected (standard20110627).\nPackage: atbegshi 2016/06/09 v1.18 At begin shipout hook (HO)\nPackage: refcount 2016/05/16 v3.5 Data extraction from label references (HO)\nPackage: hycolor 2016/05/16 v1.8 Color options for hyperref/bookmark (HO)\n)\n(/usr/share/texlive/texmf-dist/tex/latex/oberdiek/auxhook.sty\nPackage: auxhook 2016/05/16 v1.4 Hooks for auxiliary files (HO)\n)\n(/usr/share/texlive/texmf-dist/tex/latex/oberdiek/kvoptions.sty\nPackage: kvoptions 2016/05/16 v3.12 Key value format for package options (HO)\n)\n\\@linkdim=\\dimen111\n\\Hy@linkcounter=\\count91\n\\Hy@pagecounter=\\count92\n\n(/usr/share/texlive/texmf-dist/tex/latex/hyperref/pd1enc.def\nFile: pd1enc.def 2016/06/24 v6.83q Hyperref: PDFDocEncoding definition (HO)\nNow handling font encoding PD1 ...\n... no UTF-8 mapping file for font encoding PD1\n)\n\\Hy@SavedSpaceFactor=\\count93\n\n(/usr/share/texlive/texmf-dist/tex/latex/latexconfig/hyperref.cfg\nFile: hyperref.cfg 2002/06/06 v1.2 hyperref configuration of TeXLive\n)\nPackage hyperref Info: Hyper figures OFF on input line 4486.\nPackage hyperref Info: Link nesting OFF on input line 4491.\nPackage hyperref Info: Hyper index ON on input line 4494.\nPackage hyperref Info: Plain pages OFF on input line 4501.\nPackage hyperref Info: Backreferencing OFF on input line 4506.\nPackage hyperref Info: Implicit mode ON; LaTeX internals redefined.\nPackage hyperref Info: Bookmarks ON on input line 4735.\n\\c@Hy@tempcnt=\\count94\n\n(/usr/share/texlive/texmf-dist/tex/latex/url/url.sty\n\\Urlmuskip=\\muskip10\nPackage: url 2013/09/16  ver 3.4  Verb mode for urls, etc.\n)\nLaTeX Info: Redefining \\url on input line 5088.\n\\XeTeXLinkMargin=\\dimen112\n\\Fld@menulength=\\count95\n\\Field@Width=\\dimen113\n\\Fld@charsize=\\dimen114\nPackage hyperref Info: Hyper figures OFF on input line 6342.\nPackage hyperref Info: Link nesting OFF on input line 6347.\nPackage hyperref Info: Hyper index ON on input line 6350.\nPackage hyperref Info: backreferencing OFF on input line 6357.\nPackage hyperref Info: Link coloring OFF on input line 6362.\nPackage hyperref Info: Link coloring with OCG OFF on input line 6367.\nPackage hyperref Info: PDF/A mode OFF on input line 6372.\nLaTeX Info: Redefining \\ref on input line 6412.\nLaTeX Info: Redefining \\pageref on input line 6416.\n\\Hy@abspage=\\count96\n\\c@Item=\\count97\n\\c@Hfootnote=\\count98\n)\n\nPackage hyperref Message: Driver (autodetected): hpdftex.\n\n(/usr/share/texlive/texmf-dist/tex/latex/hyperref/hpdftex.def\nFile: hpdftex.def 2016/06/24 v6.83q Hyperref driver for pdfTeX\n\\Fld@listcount=\\count99\n\\c@bookmark@seq@number=\\count100\n\n(/usr/share/texlive/texmf-dist/tex/latex/oberdiek/rerunfilecheck.sty\nPackage: rerunfilecheck 2016/05/16 v1.8 Rerun checks for auxiliary files (HO)\nPackage uniquecounter Info: New unique counter `rerunfilecheck' on input line 2\n82.\n)\n\\Hy@SectionHShift=\\skip47\n)\n(/usr/share/texlive/texmf-dist/tex/latex/booktabs/booktabs.sty\nPackage: booktabs 2016/04/27 v1.618033 publication quality tables\n\\heavyrulewidth=\\dimen115\n\\lightrulewidth=\\dimen116\n\\cmidrulewidth=\\dimen117\n\\belowrulesep=\\dimen118\n\\belowbottomsep=\\dimen119\n\\aboverulesep=\\dimen120\n\\abovetopsep=\\dimen121\n\\cmidrulesep=\\dimen122\n\\cmidrulekern=\\dimen123\n\\defaultaddspace=\\dimen124\n\\@cmidla=\\count101\n\\@cmidlb=\\count102\n\\@aboverulesep=\\dimen125\n\\@belowrulesep=\\dimen126\n\\@thisruleclass=\\count103\n\\@lastruleclass=\\count104\n\\@thisrulewidth=\\dimen127\n)\n(/usr/share/texlive/texmf-dist/tex/latex/amsfonts/amsfonts.sty\nPackage: amsfonts 2013/01/14 v3.01 Basic AMSFonts support\n\\@emptytoks=\\toks18\n\\symAMSa=\\mathgroup4\n\\symAMSb=\\mathgroup5\nLaTeX Font Info:    Overwriting math alphabet `\\mathfrak' in version `bold'\n(Font)                  U/euf/m/n --> U/euf/b/n on input line 106.\n) (./nicefrac.sty\nPackage: nicefrac 1998/08/04 v0.9b Nice fractions\n\\L@UnitsRaiseDisplaystyle=\\skip48\n\\L@UnitsRaiseTextstyle=\\skip49\n\\L@UnitsRaiseScriptstyle=\\skip50\n(/usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty\nPackage: ifthen 2014/09/29 v1.1c Standard LaTeX ifthen package (DPC)\n))\n(/usr/share/texlive/texmf-dist/tex/latex/microtype/microtype.sty\nPackage: microtype 2016/05/14 v2.6a Micro-typographical refinements (RS)\n\\MT@toks=\\toks19\n\\MT@count=\\count105\nLaTeX Info: Redefining \\textls on input line 774.\n\\MT@outer@kern=\\dimen128\nLaTeX Info: Redefining \\textmicrotypecontext on input line 1310.\n\\MT@listname@count=\\count106\n\n(/usr/share/texlive/texmf-dist/tex/latex/microtype/microtype-pdftex.def\nFile: microtype-pdftex.def 2016/05/14 v2.6a Definitions specific to pdftex (RS)\n\nLaTeX Info: Redefining \\lsstyle on input line 916.\nLaTeX Info: Redefining \\lslig on input line 916.\n\\MT@outer@space=\\skip51\n)\nPackage microtype Info: Loading configuration file microtype.cfg.\n\n(/usr/share/texlive/texmf-dist/tex/latex/microtype/microtype.cfg\nFile: microtype.cfg 2016/05/14 v2.6a microtype main configuration file (RS)\n))\n(/usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty\nPackage: graphicx 2014/10/28 v1.0g Enhanced LaTeX Graphics (DPC,SPQR)\n\n(/usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty\nPackage: graphics 2016/10/09 v1.0u Standard LaTeX Graphics (DPC,SPQR)\n\n(/usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty\nPackage: trig 2016/01/03 v1.10 sin cos tan (DPC)\n)\n(/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg\nFile: graphics.cfg 2016/06/04 v1.11 sample graphics configuration\n)\nPackage graphics Info: Driver file: pdftex.def on input line 99.\n\n(/usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def\nFile: pdftex.def 2017/01/12 v0.06k Graphics/color for pdfTeX\n\\Gread@gobject=\\count107\n))\n\\Gin@req@height=\\dimen129\n\\Gin@req@width=\\dimen130\n)\n(./nips_2017.aux)\n\\openout1 = `nips_2017.aux'.\n\nLaTeX Font Info:    Checking defaults for OML/cmm/m/it on input line 30.\nLaTeX Font Info:    ... okay on input line 30.\nLaTeX Font Info:    Checking defaults for T1/cmr/m/n on input line 30.\nLaTeX Font Info:    ... okay on input line 30.\nLaTeX Font Info:    Checking defaults for OT1/cmr/m/n on input line 30.\nLaTeX Font Info:    ... okay on input line 30.\nLaTeX Font Info:    Checking defaults for OMS/cmsy/m/n on input line 30.\nLaTeX Font Info:    ... okay on input line 30.\nLaTeX Font Info:    Checking defaults for OMX/cmex/m/n on input line 30.\nLaTeX Font Info:    ... okay on input line 30.\nLaTeX Font Info:    Checking defaults for U/cmr/m/n on input line 30.\nLaTeX Font Info:    ... okay on input line 30.\nLaTeX Font Info:    Checking defaults for PD1/pdf/m/n on input line 30.\nLaTeX Font Info:    ... okay on input line 30.\nLaTeX Font Info:    Try loading font information for T1+ptm on input line 30.\n (/usr/share/texlive/texmf-dist/tex/latex/psnfss/t1ptm.fd\nFile: t1ptm.fd 2001/06/04 font definitions for T1/ptm.\n)\n*geometry* driver: auto-detecting\n*geometry* detected driver: pdftex\n*geometry* verbose mode - [ preamble ] result:\n* driver: pdftex\n* paper: letterpaper\n* layout: <same size as paper>\n* layoutoffset:(h,v)=(0.0pt,0.0pt)\n* modes: \n* h-part:(L,W,R)=(92.14519pt, 430.00462pt, 92.14519pt)\n* v-part:(T,H,B)=(95.39737pt, 556.47656pt, 143.09605pt)\n* \\paperwidth=614.295pt\n* \\paperheight=794.96999pt\n* \\textwidth=430.00462pt\n* \\textheight=556.47656pt\n* \\oddsidemargin=19.8752pt\n* \\evensidemargin=19.8752pt\n* \\topmargin=-13.87262pt\n* \\headheight=12.0pt\n* \\headsep=25.0pt\n* \\topskip=10.0pt\n* \\footskip=30.0pt\n* \\marginparwidth=65.0pt\n* \\marginparsep=11.0pt\n* \\columnsep=10.0pt\n* \\skip\\footins=9.0pt plus 4.0pt minus 2.0pt\n* \\hoffset=0.0pt\n* \\voffset=0.0pt\n* \\mag=1000\n* \\@twocolumnfalse\n* \\@twosidefalse\n* \\@mparswitchfalse\n* \\@reversemarginfalse\n* (1in=72.27pt=25.4mm, 1cm=28.453pt)\n\n*geometry* verbose mode - [ newgeometry ] result:\n* driver: pdftex\n* paper: letterpaper\n* layout: <same size as paper>\n* layoutoffset:(h,v)=(0.0pt,0.0pt)\n* modes: \n* h-part:(L,W,R)=(108.405pt, 397.48499pt, 108.40501pt)\n* v-part:(T,H,B)=(72.26999pt, 650.43pt, 72.27pt)\n* \\paperwidth=614.295pt\n* \\paperheight=794.96999pt\n* \\textwidth=397.48499pt\n* \\textheight=650.43pt\n* \\oddsidemargin=36.13501pt\n* \\evensidemargin=36.13501pt\n* \\topmargin=-37.0pt\n* \\headheight=12.0pt\n* \\headsep=25.0pt\n* \\topskip=10.0pt\n* \\footskip=30.0pt\n* \\marginparwidth=65.0pt\n* \\marginparsep=11.0pt\n* \\columnsep=10.0pt\n* \\skip\\footins=9.0pt plus 4.0pt minus 2.0pt\n* \\hoffset=0.0pt\n* \\voffset=0.0pt\n* \\mag=1000\n* \\@twocolumnfalse\n* \\@twosidefalse\n* \\@mparswitchfalse\n* \\@reversemarginfalse\n* (1in=72.27pt=25.4mm, 1cm=28.453pt)\n\n\\AtBeginShipoutBox=\\box26\nPackage hyperref Info: Link coloring OFF on input line 30.\n(/usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty\nPackage: nameref 2016/05/21 v2.44 Cross-referencing by name of section\n\n(/usr/share/texlive/texmf-dist/tex/generic/oberdiek/gettitlestring.sty\nPackage: gettitlestring 2016/05/16 v1.5 Cleanup title references (HO)\n)\n\\c@section@level=\\count108\n)\nLaTeX Info: Redefining \\ref on input line 30.\nLaTeX Info: Redefining \\pageref on input line 30.\nLaTeX Info: Redefining \\nameref on input line 30.\n\n(./nips_2017.out) (./nips_2017.out)\n\\@outlinefile=\\write3\n\\openout3 = `nips_2017.out'.\n\nLaTeX Info: Redefining \\microtypecontext on input line 30.\nPackage microtype Info: Generating PDF output.\nPackage microtype Info: Character protrusion enabled (level 2).\nPackage microtype Info: Using default protrusion set `alltext'.\nPackage microtype Info: Automatic font expansion enabled (level 2),\n(microtype)             stretch: 20, shrink: 20, step: 1, non-selected.\nPackage microtype Info: Using default expansion set `basictext'.\nPackage microtype Info: No adjustment of tracking.\nPackage microtype Info: No adjustment of interword spacing.\nPackage microtype Info: No adjustment of character kerning.\n\n(/usr/share/texlive/texmf-dist/tex/latex/microtype/mt-ptm.cfg\nFile: mt-ptm.cfg 2006/04/20 v1.7 microtype config. file: Times (RS)\n)\n(/usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii\n[Loading MPS to PDF converter (version 2006.09.02).]\n\\scratchcounter=\\count109\n\\scratchdimen=\\dimen131\n\\scratchbox=\\box27\n\\nofMPsegments=\\count110\n\\nofMParguments=\\count111\n\\everyMPshowfont=\\toks20\n\\MPscratchCnt=\\count112\n\\MPscratchDim=\\dimen132\n\\MPnumerator=\\count113\n\\makeMPintoPDFobject=\\count114\n\\everyMPtoPDFconversion=\\toks21\n) (/usr/share/texlive/texmf-dist/tex/latex/oberdiek/epstopdf-base.sty\nPackage: epstopdf-base 2016/05/15 v2.6 Base part for package epstopdf\n\n(/usr/share/texlive/texmf-dist/tex/latex/oberdiek/grfext.sty\nPackage: grfext 2016/05/16 v1.2 Manage graphics extensions (HO)\n)\nPackage epstopdf-base Info: Redefining graphics rule for `.eps' on input line 4\n38.\nPackage grfext Info: Graphics extension search list:\n(grfext)             [.png,.pdf,.jpg,.mps,.jpeg,.jbig2,.jb2,.PNG,.PDF,.JPG,.JPE\nG,.JBIG2,.JB2,.eps]\n(grfext)             \\AppendGraphicsExtensions on input line 456.\n\n(/usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg\nFile: epstopdf-sys.cfg 2010/07/13 v1.3 Configuration of (r)epstopdf for TeX Liv\ne\n))\nLaTeX Font Info:    Font shape `T1/ptm/bx/n' in size <17.28> not available\n(Font)              Font shape `T1/ptm/b/n' tried instead on input line 33.\n\n(/usr/share/texlive/texmf-dist/tex/latex/microtype/mt-cmr.cfg\nFile: mt-cmr.cfg 2013/05/19 v2.2 microtype config. file: Computer Modern Roman \n(RS)\n)\nLaTeX Font Info:    Try loading font information for U+msa on input line 33.\n\n(/usr/share/texlive/texmf-dist/tex/latex/amsfonts/umsa.fd\nFile: umsa.fd 2013/01/14 v3.01 AMS symbols A\n)\n(/usr/share/texlive/texmf-dist/tex/latex/microtype/mt-msa.cfg\nFile: mt-msa.cfg 2006/02/04 v1.1 microtype config. file: AMS symbols (a) (RS)\n)\nLaTeX Font Info:    Try loading font information for U+msb on input line 33.\n\n(/usr/share/texlive/texmf-dist/tex/latex/amsfonts/umsb.fd\nFile: umsb.fd 2013/01/14 v3.01 AMS symbols B\n)\n(/usr/share/texlive/texmf-dist/tex/latex/microtype/mt-msb.cfg\nFile: mt-msb.cfg 2005/06/01 v1.0 microtype config. file: AMS symbols (b) (RS)\n)\nLaTeX Font Info:    Font shape `T1/ptm/bx/n' in size <10> not available\n(Font)              Font shape `T1/ptm/b/n' tried instead on input line 33.\nLaTeX Font Info:    Font shape `T1/ptm/bx/n' in size <12> not available\n(Font)              Font shape `T1/ptm/b/n' tried instead on input line 34.\n\n<images/blur.png, id=21, 530.98375pt x 193.72375pt>\nFile: images/blur.png Graphic file (type png)\n <use images/blur.png>\nPackage pdftex.def Info: images/blur.png used on input line 71.\n(pdftex.def)             Requested size: 397.48499pt x 145.0205pt.\n\n<images/patches.png, id=23, 465.74pt x 290.08376pt>\nFile: images/patches.png Graphic file (type png)\n <use images/patches.png>\nPackage pdftex.def Info: images/patches.png used on input line 73.\n(pdftex.def)             Requested size: 397.48499pt x 247.58154pt.\n\n<images/matrix.png, id=24, 562.1pt x 274.02374pt>\nFile: images/matrix.png Graphic file (type png)\n <use images/matrix.png>\nPackage pdftex.def Info: images/matrix.png used on input line 75.\n(pdftex.def)             Requested size: 397.48499pt x 193.77214pt.\n\nUnderfull \\hbox (badness 10000) in paragraph at lines 71--77\n\n []\n\n\nUnderfull \\vbox (badness 10000) has occurred while \\output is active []\n\n [1\n\n\n{/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map}]\nUnderfull \\vbox (badness 10000) has occurred while \\output is active []\n\n [2 <./images/blur.png> <./images/patches.png> <./images/matrix.png>]\nLaTeX Font Info:    Font shape `T1/ptm/bx/it' in size <10> not available\n(Font)              Font shape `T1/ptm/b/it' tried instead on input line 105.\n\n<images/accuracy.jpeg, id=49, 642.4pt x 115.43124pt>\nFile: images/accuracy.jpeg Graphic file (type jpg)\n\n<use images/accuracy.jpeg>\nPackage pdftex.def Info: images/accuracy.jpeg used on input line 128.\n(pdftex.def)             Requested size: 397.48499pt x 71.42395pt.\n\nUnderfull \\hbox (badness 10000) in paragraph at lines 128--129\n\n []\n\n\nUnderfull \\vbox (badness 1137) has occurred while \\output is active []\n\n [3 <./images/accuracy.jpeg>]\nPackage atveryend Info: Empty hook `BeforeClearDocument' on input line 152.\n [4]\nPackage atveryend Info: Empty hook `AfterLastShipout' on input line 152.\n (./nips_2017.aux)\nPackage atveryend Info: Executing hook `AtVeryEndDocument' on input line 152.\nPackage atveryend Info: Executing hook `AtEndAfterFileList' on input line 152.\nPackage rerunfilecheck Info: File `nips_2017.out' has not changed.\n(rerunfilecheck)             Checksum: 1DC773C805F4BC12F6E4365B1AA15BC9;253.\nPackage atveryend Info: Empty hook `AtVeryVeryEnd' on input line 152.\n ) \nHere is how much of TeX's memory you used:\n 8271 strings out of 494945\n 123632 string characters out of 6181033\n 239876 words of memory out of 5000000\n 11331 multiletter control sequences out of 15000+600000\n 34120 words of font info for 106 fonts, out of 8000000 for 9000\n 14 hyphenation exceptions out of 8191\n 31i,8n,38p,213b,327s stack positions out of 5000i,500n,10000p,200000b,80000s\n{/usr/share/texlive/texmf-dist/fonts/enc/dvips/base/8r.enc}</usr/share/texliv\ne/texmf-dist/fonts/type1/public/amsfonts/cm/cmmi10.pfb></usr/share/texlive/texm\nf-dist/fonts/type1/public/amsfonts/cm/cmr10.pfb></usr/share/texlive/texmf-dist/\nfonts/type1/public/amsfonts/cm/cmsy10.pfb></usr/share/texlive/texmf-dist/fonts/\ntype1/urw/times/utmb8a.pfb></usr/share/texlive/texmf-dist/fonts/type1/urw/times\n/utmbi8a.pfb></usr/share/texlive/texmf-dist/fonts/type1/urw/times/utmr8a.pfb></\nusr/share/texlive/texmf-dist/fonts/type1/urw/times/utmri8a.pfb>\nOutput written on nips_2017.pdf (4 pages, 2502387 bytes).\nPDF statistics:\n 96 PDF objects out of 1000 (max. 8388607)\n 75 compressed objects within 1 object stream\n 20 named destinations out of 1000 (max. 500000)\n 18485 words of extra memory for PDF output out of 20736 (max. 10000000)\n\n"
  },
  {
    "path": "Report/nips_2017.out",
    "content": "\\BOOKMARK [1][-]{section.1}{Our approach}{}% 1\n\\BOOKMARK [2][-]{subsection.1.1}{Generating the training data}{section.1}% 2\n\\BOOKMARK [1][-]{section.2}{Learning the Convolutional Neural Network \\(CNN\\)}{}% 3\n\\BOOKMARK [1][-]{section.3}{Conclusion}{}% 4\n"
  },
  {
    "path": "Report/nips_2017.sty",
    "content": "% partial rewrite of the LaTeX2e package for submissions to the\n% Conference on Neural Information Processing Systems (NIPS):\n%\n% - uses more LaTeX conventions\n% - line numbers at submission time replaced with aligned numbers from\n%   lineno package\n% - \\nipsfinalcopy replaced with [final] package option\n% - automatically loads times package for authors\n% - loads natbib automatically; this can be suppressed with the\n%   [nonatbib] package option\n% - adds foot line to first page identifying the conference\n%\n% Roman Garnett (garnett@wustl.edu) and the many authors of\n% nips15submit_e.sty, including MK and drstrip@sandia\n%\n% last revision: March 2017\n\n\\NeedsTeXFormat{LaTeX2e}\n\\ProvidesPackage{nips_2017}[2017/03/20 NIPS 2017 submission/camera-ready style file]\n\n% declare final option, which creates camera-ready copy\n\\newif\\if@nipsfinal\\@nipsfinalfalse\n\\DeclareOption{final}{\n  \\@nipsfinaltrue\n}\n\n% declare nonatbib option, which does not load natbib in case of\n% package clash (users can pass options to natbib via\n% \\PassOptionsToPackage)\n\\newif\\if@natbib\\@natbibtrue\n\\DeclareOption{nonatbib}{\n  \\@natbibfalse\n}\n\n\\ProcessOptions\\relax\n\n% fonts\n\\renewcommand{\\rmdefault}{ptm}\n\\renewcommand{\\sfdefault}{phv}\n\n% change this every year for notice string at bottom\n\\newcommand{\\@nipsordinal}{31st}\n\\newcommand{\\@nipsyear}{2017}\n\\newcommand{\\@nipslocation}{Long Beach, CA, USA}\n\n% handle tweaks for camera-ready copy vs. submission copy\n\\if@nipsfinal\n  \\newcommand{\\@noticestring}{%\n    \\@nipsordinal\\/ Conference on Neural Information Processing Systems\n    (NIPS \\@nipsyear), \\@nipslocation.%\n  }\n\\else\n  \\newcommand{\\@noticestring}{%\n    Submitted to \\@nipsordinal\\/ Conference on Neural Information\n    Processing Systems (NIPS \\@nipsyear). Do not distribute.%\n  }\n\n  % line numbers for submission\n  \\RequirePackage{lineno}\n  \\linenumbers\n\n  % fix incompatibilities between lineno and amsmath, if required, by\n  % transparently wrapping linenomath environments around amsmath\n  % environments\n  \\AtBeginDocument{%\n    \\@ifpackageloaded{amsmath}{%\n      \\newcommand*\\patchAmsMathEnvironmentForLineno[1]{%\n        \\expandafter\\let\\csname old#1\\expandafter\\endcsname\\csname #1\\endcsname\n        \\expandafter\\let\\csname oldend#1\\expandafter\\endcsname\\csname end#1\\endcsname\n        \\renewenvironment{#1}%\n          {\\linenomath\\csname old#1\\endcsname}%\n          {\\csname oldend#1\\endcsname\\endlinenomath}%\n      }%\n      \\newcommand*\\patchBothAmsMathEnvironmentsForLineno[1]{%\n        \\patchAmsMathEnvironmentForLineno{#1}%\n        \\patchAmsMathEnvironmentForLineno{#1*}%\n      }%\n      \\patchBothAmsMathEnvironmentsForLineno{equation}%\n      \\patchBothAmsMathEnvironmentsForLineno{align}%\n      \\patchBothAmsMathEnvironmentsForLineno{flalign}%\n      \\patchBothAmsMathEnvironmentsForLineno{alignat}%\n      \\patchBothAmsMathEnvironmentsForLineno{gather}%\n      \\patchBothAmsMathEnvironmentsForLineno{multline}%\n    }{}\n  }\n\\fi\n\n% load natbib unless told otherwise\n\\if@natbib\n  \\RequirePackage{natbib}\n\\fi\n\n% set page geometry\n\\usepackage[verbose=true,letterpaper]{geometry}\n\\AtBeginDocument{\n  \\newgeometry{\n    textheight=9in,\n    textwidth=5.5in,\n    top=1in,\n    headheight=12pt,\n    headsep=25pt,\n    footskip=30pt\n  }\n  \\@ifpackageloaded{fullpage}\n    {\\PackageWarning{nips_2016}{fullpage package not allowed! Overwriting formatting.}}\n    {}\n}\n\n\\widowpenalty=10000\n\\clubpenalty=10000\n\\flushbottom\n\\sloppy\n\n% font sizes with reduced leading\n\\renewcommand{\\normalsize}{%\n  \\@setfontsize\\normalsize\\@xpt\\@xipt\n  \\abovedisplayskip      7\\p@ \\@plus 2\\p@ \\@minus 5\\p@\n  \\abovedisplayshortskip \\z@ \\@plus 3\\p@\n  \\belowdisplayskip      \\abovedisplayskip\n  \\belowdisplayshortskip 4\\p@ \\@plus 3\\p@ \\@minus 3\\p@\n}\n\\normalsize\n\\renewcommand{\\small}{%\n  \\@setfontsize\\small\\@ixpt\\@xpt\n  \\abovedisplayskip      6\\p@ \\@plus 1.5\\p@ \\@minus 4\\p@\n  \\abovedisplayshortskip \\z@  \\@plus 2\\p@\n  \\belowdisplayskip      \\abovedisplayskip\n  \\belowdisplayshortskip 3\\p@ \\@plus 2\\p@   \\@minus 2\\p@\n}\n\\renewcommand{\\footnotesize}{\\@setfontsize\\footnotesize\\@ixpt\\@xpt}\n\\renewcommand{\\scriptsize}{\\@setfontsize\\scriptsize\\@viipt\\@viiipt}\n\\renewcommand{\\tiny}{\\@setfontsize\\tiny\\@vipt\\@viipt}\n\\renewcommand{\\large}{\\@setfontsize\\large\\@xiipt{14}}\n\\renewcommand{\\Large}{\\@setfontsize\\Large\\@xivpt{16}}\n\\renewcommand{\\LARGE}{\\@setfontsize\\LARGE\\@xviipt{20}}\n\\renewcommand{\\huge}{\\@setfontsize\\huge\\@xxpt{23}}\n\\renewcommand{\\Huge}{\\@setfontsize\\Huge\\@xxvpt{28}}\n\n% sections with less space\n\\providecommand{\\section}{}\n\\renewcommand{\\section}{%\n  \\@startsection{section}{1}{\\z@}%\n                {-2.0ex \\@plus -0.5ex \\@minus -0.2ex}%\n                { 1.5ex \\@plus  0.3ex \\@minus  0.2ex}%\n                {\\large\\bf\\raggedright}%\n}\n\\providecommand{\\subsection}{}\n\\renewcommand{\\subsection}{%\n  \\@startsection{subsection}{2}{\\z@}%\n                {-1.8ex \\@plus -0.5ex \\@minus -0.2ex}%\n                { 0.8ex \\@plus  0.2ex}%\n                {\\normalsize\\bf\\raggedright}%\n}\n\\providecommand{\\subsubsection}{}\n\\renewcommand{\\subsubsection}{%\n  \\@startsection{subsubsection}{3}{\\z@}%\n                {-1.5ex \\@plus -0.5ex \\@minus -0.2ex}%\n                { 0.5ex \\@plus  0.2ex}%\n                {\\normalsize\\bf\\raggedright}%\n}\n\\providecommand{\\paragraph}{}\n\\renewcommand{\\paragraph}{%\n  \\@startsection{paragraph}{4}{\\z@}%\n                {1.5ex \\@plus 0.5ex \\@minus 0.2ex}%\n                {-1em}%\n                {\\normalsize\\bf}%\n}\n\\providecommand{\\subparagraph}{}\n\\renewcommand{\\subparagraph}{%\n  \\@startsection{subparagraph}{5}{\\z@}%\n                {1.5ex \\@plus 0.5ex \\@minus 0.2ex}%\n                {-1em}%\n                {\\normalsize\\bf}%\n}\n\\providecommand{\\subsubsubsection}{}\n\\renewcommand{\\subsubsubsection}{%\n  \\vskip5pt{\\noindent\\normalsize\\rm\\raggedright}%\n}\n\n% float placement\n\\renewcommand{\\topfraction      }{0.85}\n\\renewcommand{\\bottomfraction   }{0.4}\n\\renewcommand{\\textfraction     }{0.1}\n\\renewcommand{\\floatpagefraction}{0.7}\n\n\\newlength{\\@nipsabovecaptionskip}\\setlength{\\@nipsabovecaptionskip}{7\\p@}\n\\newlength{\\@nipsbelowcaptionskip}\\setlength{\\@nipsbelowcaptionskip}{\\z@}\n\n\\setlength{\\abovecaptionskip}{\\@nipsabovecaptionskip}\n\\setlength{\\belowcaptionskip}{\\@nipsbelowcaptionskip}\n\n% swap above/belowcaptionskip lengths for tables\n\\renewenvironment{table}\n  {\\setlength{\\abovecaptionskip}{\\@nipsbelowcaptionskip}%\n   \\setlength{\\belowcaptionskip}{\\@nipsabovecaptionskip}%\n   \\@float{table}}\n  {\\end@float}\n\n% footnote formatting\n\\setlength{\\footnotesep }{6.65\\p@}\n\\setlength{\\skip\\footins}{9\\p@ \\@plus 4\\p@ \\@minus 2\\p@}\n\\renewcommand{\\footnoterule}{\\kern-3\\p@ \\hrule width 12pc \\kern 2.6\\p@}\n\\setcounter{footnote}{0}\n\n% paragraph formatting\n\\setlength{\\parindent}{\\z@}\n\\setlength{\\parskip  }{5.5\\p@}\n\n% list formatting\n\\setlength{\\topsep       }{4\\p@ \\@plus 1\\p@   \\@minus 2\\p@}\n\\setlength{\\partopsep    }{1\\p@ \\@plus 0.5\\p@ \\@minus 0.5\\p@}\n\\setlength{\\itemsep      }{2\\p@ \\@plus 1\\p@   \\@minus 0.5\\p@}\n\\setlength{\\parsep       }{2\\p@ \\@plus 1\\p@   \\@minus 0.5\\p@}\n\\setlength{\\leftmargin   }{3pc}\n\\setlength{\\leftmargini  }{\\leftmargin}\n\\setlength{\\leftmarginii }{2em}\n\\setlength{\\leftmarginiii}{1.5em}\n\\setlength{\\leftmarginiv }{1.0em}\n\\setlength{\\leftmarginv  }{0.5em}\n\\def\\@listi  {\\leftmargin\\leftmargini}\n\\def\\@listii {\\leftmargin\\leftmarginii\n              \\labelwidth\\leftmarginii\n              \\advance\\labelwidth-\\labelsep\n              \\topsep  2\\p@ \\@plus 1\\p@    \\@minus 0.5\\p@\n              \\parsep  1\\p@ \\@plus 0.5\\p@ \\@minus 0.5\\p@\n              \\itemsep \\parsep}\n\\def\\@listiii{\\leftmargin\\leftmarginiii\n              \\labelwidth\\leftmarginiii\n              \\advance\\labelwidth-\\labelsep\n              \\topsep    1\\p@ \\@plus 0.5\\p@ \\@minus 0.5\\p@\n              \\parsep    \\z@\n              \\partopsep 0.5\\p@ \\@plus 0\\p@ \\@minus 0.5\\p@\n              \\itemsep \\topsep}\n\\def\\@listiv {\\leftmargin\\leftmarginiv\n              \\labelwidth\\leftmarginiv\n              \\advance\\labelwidth-\\labelsep}\n\\def\\@listv  {\\leftmargin\\leftmarginv\n              \\labelwidth\\leftmarginv\n              \\advance\\labelwidth-\\labelsep}\n\\def\\@listvi {\\leftmargin\\leftmarginvi\n              \\labelwidth\\leftmarginvi\n              \\advance\\labelwidth-\\labelsep}\n\n% create title\n\\providecommand{\\maketitle}{}\n\\renewcommand{\\maketitle}{%\n  \\par\n  \\begingroup\n    \\renewcommand{\\thefootnote}{\\fnsymbol{footnote}}\n    % for perfect author name centering\n    \\renewcommand{\\@makefnmark}{\\hbox to \\z@{$^{\\@thefnmark}$\\hss}}\n    % The footnote-mark was overlapping the footnote-text,\n    % added the following to fix this problem               (MK)\n    \\long\\def\\@makefntext##1{%\n      \\parindent 1em\\noindent\n      \\hbox to 1.8em{\\hss $\\m@th ^{\\@thefnmark}$}##1\n    }\n    \\thispagestyle{empty}\n    \\@maketitle\n    \\@thanks\n    \\@notice\n  \\endgroup\n  \\let\\maketitle\\relax\n  \\let\\thanks\\relax\n}\n\n% rules for title box at top of first page\n\\newcommand{\\@toptitlebar}{\n  \\hrule height 4\\p@\n  \\vskip 0.25in\n  \\vskip -\\parskip%\n}\n\\newcommand{\\@bottomtitlebar}{\n  \\vskip 0.29in\n  \\vskip -\\parskip\n  \\hrule height 1\\p@\n  \\vskip 0.09in%\n}\n\n% create title (includes both anonymized and non-anonymized versions)\n\\providecommand{\\@maketitle}{}\n\\renewcommand{\\@maketitle}{%\n  \\vbox{%\n    \\hsize\\textwidth\n    \\linewidth\\hsize\n    \\vskip 0.1in\n    \\@toptitlebar\n    \\centering\n    {\\LARGE\\bf \\@title\\par}\n    \\@bottomtitlebar\n    \\if@nipsfinal\n      \\def\\And{%\n        \\end{tabular}\\hfil\\linebreak[0]\\hfil%\n        \\begin{tabular}[t]{c}\\bf\\rule{\\z@}{24\\p@}\\ignorespaces%\n      }\n      \\def\\AND{%\n        \\end{tabular}\\hfil\\linebreak[4]\\hfil%\n        \\begin{tabular}[t]{c}\\bf\\rule{\\z@}{24\\p@}\\ignorespaces%\n      }\n      \\begin{tabular}[t]{c}\\bf\\rule{\\z@}{24\\p@}\\@author\\end{tabular}%\n    \\else\n      \\begin{tabular}[t]{c}\\bf\\rule{\\z@}{24\\p@}\n        Anonymous Author(s) \\\\\n        Affiliation \\\\\n        Address \\\\\n        \\texttt{email} \\\\\n      \\end{tabular}%\n    \\fi\n    \\vskip 0.3in \\@minus 0.1in\n  }\n}\n\n% add conference notice to bottom of first page\n\\newcommand{\\ftype@noticebox}{8}\n\\newcommand{\\@notice}{%\n  % give a bit of extra room back to authors on first page\n  \\enlargethispage{2\\baselineskip}%\n  \\@float{noticebox}[b]%\n    \\footnotesize\\@noticestring%\n  \\end@float%\n}\n\n% abstract styling\n\\renewenvironment{abstract}%\n{%\n  \\vskip 0.075in%\n  \\centerline%\n  {\\large\\bf Abstract}%\n  \\vspace{0.5ex}%\n  \\begin{quote}%\n}\n{\n  \\par%\n  \\end{quote}%\n  \\vskip 1ex%\n}\n\n\\endinput\n"
  },
  {
    "path": "Report/nips_2017.tex",
    "content": "\\documentclass{article}\n\n\\usepackage[final]{nips_2017}\n\n% to compile a camera-ready version, add the [final] option, e.g.:\n% \\usepackage[final]{nips_2017}\n\n\\usepackage[utf8]{inputenc} % allow utf-8 input\n\\usepackage[T1]{fontenc}    % use 8-bit T1 fonts\n\\usepackage{hyperref}       % hyperlinks\n\\usepackage{url}            % simple URL typesetting\n\\usepackage{booktabs}       % professional-quality tables\n\\usepackage{amsfonts}       % blackboard math symbols\n\\usepackage{nicefrac}       % compact symbols for 1/2, etc.\n\\usepackage{microtype}      % microtypography\n\\usepackage{graphicx}\t\t% to allow images\n\n\\graphicspath{ {images/} }\n\n\\title{Detecting Motion Blur in Images}\n\n\\author{\n  Karan Varindani \\\\\n  \\And\n  Wenyang Zhang \\\\\n  \\And\n  Sibo Zhu \\\\\n}\n\n\\begin{document}\n\n\\maketitle\n\n\\begin{abstract}\n  Our project aims to estimate motion blur from a single, blurry image. \n  We propose a deep learning approach to predict the probabilistic distribution \n  of motion blur at the patch level using a Convolutional Neural Network (CNN).\n\\end{abstract}\n\n\\section{Our approach}\nWe approached the problem by slicing 100 images into 30x30 patches, and applied\nour own motion blur algorithm to them (with a random rate of 50\\%). We then labeled \nthe blurry and non-blurry patches with 0s and 1s (0 for still, 1 for blurry), and\nloaded the modified images in as our training data.\n  \n\\subsection{Generating the training data}\nWe generated the training data using images from the \n\\textit{\\href{http://host.robots.ox.ac.uk/pascal/VOC/voc2010/}{Pascal Visual Object \nClasses Challenge 2010 (VOC2010)}} data set. Our work was done in Python using the \n\\textit{PIL}, \\textit{numpy}, \\textit{opency}, and \\textit{os} libraries.\n\nOnce we had the original images from \\textit{Pascal}, we had to modify them to fit our \nneeds. We needed to have 100 images, each partially blurred and with a corresponding \nmatrix indicating which part of the image is blurred. \n\nWe achieved this by:\n\\begin{enumerate}\n  \\item Making a blurred copy of the original image. \n  \\item Cutting both images (original and blurry) into $30\\times30$ patches.\n  \\item Creating a 2D List in Python of size $30\\times30$, to represent each image patch\n  We initialize each element to 0 (to represent non-blurry). \n  \\item Picking half the patches from the list and marking them as 1 (to represent \n  blurry).\n  \\item Putting the final image together to get a partially-blurred, qualifying image \n  (and its corresponding matrix).\n  \\item Saving the image as \"n.jpg\" (where n is the serial number of the image), and \n  adding the matrix to a list (to form a 3D 'list of lists') containing the matrices \n  of all the image.\n\\end{enumerate}\n\n\\includegraphics[width=\\textwidth]{blur} \\\\\n\\textit{Image of the original-to-blur process.} \\\\ \n\\includegraphics[width=\\textwidth]{patches} \\\\\n\\textit{Image of the image splitting process.} \\\\\n\\includegraphics[width=\\textwidth]{matrix} \\\\\n\\textit{The final image with its corresponding matrix.} \\\\\n\nWe repeat the above for all 100 images, until we end up with a folder containing \npartially-blurred images \\textit{\"0.jpg\"} through \\textit{\"100.jpg\"}, and a 3D list \n(named \\textit{\"labels\"}) that contains 100 matrices. This lets us access the matrix for \nimage \\textit{\"31.jpg\"}, for example, by querying for \\textit{\"labels[31]\"}.\n\n\\section{Learning the Convolutional Neural Network (CNN)}\nOnce we had the prepared images, \\textbf{we loaded them into our training set.} We ran \ninto a problem loading the images into a \\textit{numpy} array, where our images were of \nthe form (30,30,3), while the \\textit{Keras.Conv2D} layer required input to be of the form \n(3,30,30). We solved this by using the \\textit{numpy.swapexes()} function to alter the \nimages' shape in order to fit the convolutional layer.\n\n\\paragraph{We then apply the CNN learning model.}\nFirst, we apply a Convolution2D layer with $7\\times7$ filters, followed by a \\textit{ReLU} \nfunction. The \\textit{Conv} layer's parameters consiste of a set of learnable filters. \nEach filter is small spatially, but extends through the depth of the input volume. \n\n\\paragraph{During the forward pass,} \nwe slide each filter across the width and height of the input \nvolume and compute the dot products between the entries of the filter and the input at \nany position. \\textit{ReLU} is the rectifier function- an activation function that can be \nused by neurons, just like any other activation function. A node using the rectifier \nactivation function is called a \\textit{ReLU node}. \\textit{ReLU} sets all negative values \nin the matrix x to 0, and all other values are kept constant. ReLU us computed after the \nconvolution, and thus a nonlinear activation function (like \\textit{tanh} or \n\\textit{sigmoid}).\n\nAfter that, \\textbf{we add a \\textit{MaxPooling2D} layer} with a pool size of $2\\times2$. \nMaxPooling is a sample-based discretization process. The objective is to down-sample an \ninput representation, reducing its dimensionality and allowing for assumptions to be made \nabout features contained in the binned sub-regions. \n\nWe then \\textbf{add a \\textit{Dropout} layer} with dropout rate of 0.2, which makes our \nlearning process faster. Dropout randomly ignoring nodes is useful in CNN models because \nit prevents interdependencies from emerging between nodes. This allows the network to learn \nmore and form a more robust relationship. We then do the \\textit{'Conv2D, ReLU, \nMaxPooling2D, Dropout'} circle again. Finally, we add a fully-connected layer with \n\\textit{ReLU}, and then \\textit{softmax} the result. \\textit{Softmax} is a classifier at \nthe end of the neural network — a logistic regression to regularize outputs to a value \nbetween 0 and 1. \n\n\\paragraph{We set our model's learning rate to be $0.01$.} This might generally be too \nbig, but we made this decision for the sake of brevity - it was the fastest way to show \na result. We chose a batch size of 126 (because we had large training data). We also \nchose \\textit{Adam} as our optimizer as it's the most efficient optimizer for our model.\n\nAfter training with 100 epochs, \\textbf{we had testing accuracy of 92\\%}, which is a \nvery optimal rate for our model. Our training model is saved in an HDF5 file, \n\\textit{\"motionblur.h5\"}.\n\n\\includegraphics[width=\\textwidth]{accuracy} \\\\\n\n\\section{Conclusion}\nIn this report, we have proposed a novel CNN-based motion blur detection apporach. We \nlearn an effective CNN for estimating motion blur from local patches. In the future, \nwe are interested in designing a CNN for eastimating motion kernels. We are also interested \nin design a CNN non-uniform motion deblurring method.   \n\n\\section*{Acknowledgement}\nThis report has been prepared for the Boston University Machine Learning course (CS 542), \ntaken over the Summer 2, 2017 semester by the listed authors. It is intended to be used \nin compliance of the requirements of the course. \n\n\\section*{References}\n\\medskip\n\\small\n\n[1] Jian Sun, Wenfei Cao, Zongben Xu, Jean Ponce. Learning a convolutional neural network \nfor non-uniform motion blur removal. CVPR 2015 - IEEE Conference on Computer Vision and \nPattern Recognition 2015, Jun 2015, Boston, United States. IEEE, 2015, .\n\n[2] “Visual Object Classes Challenge 2010 (VOC2010).” The PASCAL Visual Object Classes \nChallenge 2010 (VOC2010), PASCAL, 2010, host.robots.ox.ac.uk/pascal/VOC/voc2010/.\n\n\\end{document}\n"
  },
  {
    "path": "cnn.py",
    "content": "import numpy\nimport cv2\nimport PIL\nimport os, os.path\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom keras.callbacks import EarlyStopping\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\n\nfrom keras.constraints import maxnorm\nfrom keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array\nfrom matplotlib import pyplot\nfrom scipy.misc import toimage\nfrom keras.models import Sequential\nfrom keras.layers import Dropout\nfrom keras import callbacks\nfrom keras.layers import Dense, Flatten\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.optimizers import RMSprop, SGD,Adam\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.utils import np_utils\nfrom keras import backend as k\n\nk.set_image_dim_ordering('th')\n\nseed = 7\nnumpy.random.seed(seed)\ntrainblur_directory = './s_cnn/train/blur/'\ntrainnoblur_directory = './s_cnn/train/no_blur/'\ntestblur_directory = './s_cnn/test/0/blur/'\ntestnoblur_directory = './s_cnn/test/0/no_blur/'\nfilepath = \"./s_cnn/models/\"\n\nnum_classes = 2\n#########################################################\n#loading blurry images\nimg_data_list1=[]\ndata_dir_list1 = os.listdir(trainblur_directory)\n\nimg_list1=os.listdir(trainblur_directory)\nfor img in img_list1:\n\tinput_img=cv2.imread(trainblur_directory +  img )\n\tinput_img=numpy.swapaxes(input_img,0,2)\n\n\timg_data_list1.append(input_img)\n\nimg_data1 = numpy.array(img_data_list1)\nimg_data1 = img_data1.astype('float32')\nimg_data1 /= 255\n\nprint(img_data1.shape)\nnum_of_samples1 = img_data1.shape[0]\nlabels1 = numpy.ones((num_of_samples1,),dtype='int64')\nprint(\"length of labels1 is \"+str(len(labels1)))\nprint(\"labels1 are all \"+str(labels1[10]))\n\n##########################################################\n#laoding none blurry images\n\nimg_data_list2=[]\ndata_dir_list2 = os.listdir(trainnoblur_directory)\n\nimg_list2=os.listdir(trainnoblur_directory)\nfor img in img_list2:\n\tinput_img=cv2.imread(trainnoblur_directory +  img )\n\tinput_img = numpy.swapaxes(input_img, 0, 2)\n\timg_data_list2.append(input_img)\n\nimg_data2 = numpy.array(img_data_list2)\nimg_data2 = img_data2.astype('float32')\nimg_data2 /= 255\nprint(img_data2.shape)\n\nnum_of_samples2 = img_data2.shape[0]\nlabels2 = numpy.ones((num_of_samples2,),dtype='int64')\nlabels2[:]=0\nprint(\"length of labels2 is \"+str(len(labels2)))\nprint(\"labels1 are all \"+str(labels2[10]))\n#######################################################\n# Combine the two numpy arrays and shuffle\nlabels=numpy.concatenate((labels1,labels2),axis=0)\nimg_data = numpy.concatenate((img_data1,img_data2),axis=0)\nY = np_utils.to_categorical(labels, num_classes)\n#Shuffle the dataset\nx,y = shuffle(img_data,Y, random_state=2)\n# Split the dataset\nX_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2)\n###########################################################\n# Defining the model\ninput_shape=img_data[0].shape\n\nmodel = Sequential()\nmodel.add(Convolution2D(96, 7,7,input_shape=input_shape))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.2))\nmodel.add(Convolution2D(256, 5, 5))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.2))\n\nmodel.add(Flatten())\nmodel.add(Dense(1024))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(num_classes))\nmodel.add(Activation('softmax'))\n\n###########################\nepochs = 100\nlearning_rate = 0.01\ndecay = learning_rate / epochs\nadam = Adam(lr=learning_rate)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam',metrics=[\"accuracy\"])\n\nmodel.summary()\nmodel.get_config()\nmodel.layers[0].get_config()\nmodel.layers[0].input_shape\nmodel.layers[0].output_shape\nmodel.layers[0].get_weights()\nnumpy.shape(model.layers[0].get_weights()[0])\nmodel.layers[0].trainable\n\n# Training\nhist = model.fit(X_train, y_train, batch_size=128, nb_epoch=100, verbose=1, validation_data=(X_test, y_test))\n\nfilename='model_train_new.csv'\ncsv_log=callbacks.CSVLogger(filename, separator=',', append=False)\n\nearly_stopping=callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='min')\n\ncheckpoint = callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n\n# tensorboard callback\ntensorboard_callback = k.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)\n\ncallbacks_list = [csv_log,early_stopping,checkpoint, tensorboard_callback]\n\n\n# Evaluating the model\n\nscore = model.evaluate(X_test, y_test, show_accuracy=True, verbose=0)\nprint('Test Loss:', score[0])\nprint('Test accuracy:', score[1])\n\ntest_image = X_test[0:1]\nprint (test_image.shape)\n\nprint(model.predict(test_image))\nprint(model.predict_classes(test_image))\nprint(y_test[0:1])\n\n# Save our model here\nfile = open(filepath+\"motionblur.h5\", 'a')\nmodel.save(filepath+\"motionblur.h5\")\nfile.close()\n######################################################\n\n"
  },
  {
    "path": "image_slice.py",
    "content": "from PIL import Image\nimg = Image.open(\"/Users/sibozhu/DeepLearning/testing/JR_blur.jpg\")\nprint(img)\n(imageWidth, imageHeight)=img.size\ngridx=60\ngridy=60\nrangex=img.width/gridx\nrangey=img.height/gridy\nprint rangex*rangey\nfor x in xrange(rangex):\n    for y in xrange(rangey):\n        bbox=(x*gridx, y*gridy, x*gridx+gridx, y*gridy+gridy)\n        slice_bit=img.crop(bbox)\n        slice_bit.save('/Users/sibozhu/DeepLearning/testing/ppt/'+str(x)+','+str(y)+'.jpg', optimize=True, bits=6)\n"
  },
  {
    "path": "mergingpatches.py",
    "content": "import numpy as np\nimport cv2\nimport random\nfrom PIL import Image\nimport PIL\nimport os, os.path\nimport numpy as np\nfrom natsort import natsorted\nimport matplotlib.pyplot as plt\n\npatch_dir = \"./testing/\"\ntemp_dir = \"./testing/temp/\"\nresult_dir = \"./testing/result/\"\n\npi_imgs = []\n# cv_imgs = []\nvalid_images = [\".jpg\"]\nfor f in os.listdir(patch_dir):\n    ext = os.path.splitext(f)[1]\n    if ext.lower() not in valid_images:\n        continue\n    pi_imgs.append(Image.open(os.path.join(patch_dir,f)))\n    # cv_imgs.append(cv2.imread(os.path.join(patch_dir,f)))\ntotal = len(pi_imgs)\n\nprint(str(len(pi_imgs))+\" patches in total\")\n\n#######################################################\n\"\"\"Loading all the images from directory\"\"\"\ndir=[]\nvalid_images = [\".jpg\"]\nfor f in os.listdir(patch_dir):\n    ext = os.path.splitext(f)[1]\n    if ext.lower() not in valid_images:\n        continue\n    dir.append(os.path.join(patch_dir,f))\n\n\n##############################################################\n'''\nfor testing here, no influence on global algorithm\n'''\n\n\n# flg = './testing/0_5_9,blur.jpg'\n#\n# im_index = flg.split(\"/\")[-1].split(\",\")[0].split(\"_\")[0]\n# print(\"the image's index: \"+str(im_index))\n#\n# im_width = flg.split(\"/\")[-1].split(\",\")[0].split(\"_\")[1]\n# print(\"the image's width's code: \"+str(im_width))\n#\n# im_height = flg.split(\"/\")[-1].split(\",\")[0].split(\"_\")[2]\n# print(\"the image's height's code: \"+str(im_height))\n\n#######################################\n\"\"\"concat images with numpy\"\"\"\n\ndef concat_img_horizon(list_imgs):\n    imgs = [PIL.Image.open(i) for i in list_imgs]\n    # pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)\n    min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]\n    #\n    imgs_comb = np.hstack((np.asarray(i.resize(min_shape)) for i in imgs))\n    imgs_comb = PIL.Image.fromarray( imgs_comb)\n    # imgs_comb.save( './testing/temp/'+str(save_name) +',horizon.jpg' )\n    return imgs_comb\n\ndef concat_img_vertical(list_imgs):\n    imgs = [PIL.Image.open(i) for i in list_imgs]\n    # pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)\n    min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]\n    imgs_comb = np.hstack((np.asarray(i.resize(min_shape)) for i in imgs))\n    # for a vertical stacking it is simple: use vstack\n    imgs_comb = np.vstack((np.asarray(i.resize(min_shape)) for i in imgs))\n    imgs_comb = PIL.Image.fromarray(imgs_comb)\n    # imgs_comb.save( './testing/temp/'+str(save_name) +',vertical.jpg' )\n    return imgs_comb\n\ndef concat_temp_horizon(list_imgs):\n    imgs_comb = np.hstack((np.asarray(i) for i in list_imgs))\n    imgs_comb = PIL.Image.fromarray(imgs_comb)\n    return imgs_comb\n\n\n#########################################################\n'''\nfor testing here, no influence on global algorithm\n'''\n#\n# list_im1 = pic_index[0][:3]\n# list_im2 = pic_index[0][3:6]\n# test1 = concat_img_vertical(list_im1)\n# test2 = concat_img_vertical(list_im2)\n# test3 = concat_temp_horizon(test1,test2)\n# test4 = concat_temp_horizon(test2,test3)\n# plt.imshow(test4)\n# plt.show()\n\n##############################################################\n\n\"\"\"counting number of whole pictures in the folder\"\"\"\nmax_index=0\nfor i in range(len(dir)):\n    flag = int(dir[i].split(\"/\")[-1].split(\",\")[0].split(\"_\")[0])\n    if flag > max_index:\n        max_index = flag\n    # im_width = dir[i].split(\"/\")[-1].split(\",\")[0].split(\"_\")[1]\n    # im_height = dir[i].split(\"/\")[-1].split(\",\")[0].split(\"_\")[2]\n\n#############################################\n\"\"\"placing patches to their certain picture\"\"\"\npic_index={}\nfor elem in range(max_index+1):\n    pic_index[elem]=[]\n\nfor j in range(len(dir)):\n    for k in range(len(pic_index)):\n        if int(dir[j].split(\"/\")[-1].split(\",\")[0].split(\"_\")[0]) == k:\n            pic_index[k].append(dir[j])\n\nprint('the first picture contains '+str(len(pic_index[0]))+' patches')\n\n##########################################\n\"\"\"getting the total columns of picture\"\"\"\ndef get_total_column(list):\n    max_col = 0\n    for l in range(len(list)):\n        col_flag = int(pic_index[0][l].split(\"/\")[-1].split(\",\")[0].split(\"_\")[1])\n        if col_flag > max_col:\n            max_col = col_flag\n    return max_col\n\nprint (\"this picture's total column is \"+str(get_total_column(pic_index[0])))\n#########################################\n\"\"\"getting the total rows of picture\"\"\"\ndef get_total_row(list):\n    max_row = 0\n    for o in range(len(list)):\n        row_flag = int(list[o].split(\"/\")[-1].split(\",\")[0].split(\"_\")[2])\n        if row_flag > max_row:\n            max_row = row_flag\n    return max_row\nprint (\"this picture's total row is \" + str(get_total_row(pic_index[0])))\n\n\n########################################\n\"\"\"sorting this picture's patches with order of name\"\"\"\ndef sort_picture_patches(list):\n    return natsorted(list)\n#####################################\n\n\"\"\"doing global merging\"\"\"\n\nfor a in range(len(pic_index)):\n    max_col = get_total_column(pic_index[a])\n    max_row = get_total_row(pic_index[a])\n    pic_index[a] = sort_picture_patches(pic_index[a])\n    col_index = {}\n    for elem in range(max_col + 1):\n        col_index[elem] = []\n\n    for j in range(len(pic_index[a])):\n        for k in range(max_col):\n            if int(pic_index[a][j].split(\"/\")[-1].split(\",\")[0].split(\"_\")[1]) == k:\n                col_index[k].append(pic_index[a][j])\n\n    saver = []\n\n    for i in range(max_col - 1):\n        flag = concat_img_vertical(col_index[i])\n        saver.append(flag)\n\n    res = concat_temp_horizon(saver)\n    res.save(result_dir + str(a)+ '.jpg')\n\n"
  },
  {
    "path": "motionblur.py",
    "content": "import cv2\nimport numpy as np\n\nimg = cv2.imread('/Users/sibozhu/DeepLearning/testing/JR.jpg')\n\n\nsize = 15\n\n# generating the kernel\nkernel_motion_blur = np.zeros((size, size))\nkernel_motion_blur[int((size-1)/2), :] = np.ones(size)\nkernel_motion_blur = kernel_motion_blur / size\n\n# applying the kernel to the input image\noutput = cv2.filter2D(img, -1, kernel_motion_blur)\n\n\ncv2.imwrite('/Users/sibozhu/DeepLearning/testing/JR_blur.jpg',output)\n\n"
  },
  {
    "path": "processing_utils.py",
    "content": "'''\nutility function package for image processing\nby Sibo Zhu, Kieran Xiao Wang\n2017.08.24\n'''\nimport numpy as np\nimport cv2\nimport random\nfrom PIL import Image\nimport PIL\nfrom natsort import natsorted\nimport os, os.path\n\ndef save_image(image_np_array, image_save_path):\n    '''\n    !!! Sibo, Please complete this function !!!\n    :param image_np_array: 3-D numpy array\n    :param image_save_path: string\n    :return:\n    '''\n    im = Image.fromarray(image_np_array)\n    im.save(image_save_path)\n\n\ndef patch_merge_to_one_from_folder(patch_dir):\n    '''\n    merge patches into a whole image\n    !!! Sibo, please complete this function !!!\n    !!! now you are using naming for indicating patch location and blur/no blur, which is fine\n    !!! the patch_dir is supposed to contain all patches of an image(no matter blured or not)\n    !!! you may want to use os.walk\n    !!! if so avoid to have any other .jpg file except image patches(e.g. do not save the whole image in .jpg in that folder)\n    :param patch_dir: [string] directory to the folder that contains all patches(of an image)\n    :return: [3-d array] whole image in np array\n    '''\n    patch_dir = patch_dir\n\n    pi_imgs = []\n    valid_images = [\".jpg\"]\n    for f in os.listdir(patch_dir):\n        ext = os.path.splitext(f)[1]\n        if ext.lower() not in valid_images:\n            continue\n        pi_imgs.append(Image.open(os.path.join(patch_dir, f)))\n    total = len(pi_imgs)\n\n    print(str(len(pi_imgs)) + \" patches in total\")\n\n    #######################################################\n    \"\"\"Loading all the images from directory\"\"\"\n    dir = []\n    valid_images = [\".jpg\"]\n    for f in os.listdir(patch_dir):\n        ext = os.path.splitext(f)[1]\n        if ext.lower() not in valid_images:\n            continue\n        dir.append(os.path.join(patch_dir, f))\n\n    #####################################################\n    \"\"\"concat images with numpy\"\"\"\n\n    def concat_img_horizon(list_imgs):\n        imgs = [PIL.Image.open(i) for i in list_imgs]\n        # pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)\n        min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]\n        #\n        imgs_comb = np.hstack((np.asarray(i.resize(min_shape)) for i in imgs))\n        imgs_comb = PIL.Image.fromarray(imgs_comb)\n        return imgs_comb\n\n    def concat_img_vertical(list_imgs):\n        imgs = [PIL.Image.open(i) for i in list_imgs]\n        # pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)\n        min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]\n        imgs_comb = np.hstack((np.asarray(i.resize(min_shape)) for i in imgs))\n        # for a vertical stacking it is simple: use vstack\n        imgs_comb = np.vstack((np.asarray(i.resize(min_shape)) for i in imgs))\n        imgs_comb = PIL.Image.fromarray(imgs_comb)\n        return imgs_comb\n\n    def concat_temp_horizon(list_imgs):\n        imgs_comb = np.hstack((np.asarray(i) for i in list_imgs))\n        imgs_comb = PIL.Image.fromarray(imgs_comb)\n        return imgs_comb\n\n    #########################################################\n\n\n    \"\"\"counting number of whole pictures in the folder\"\"\"\n    max_index = 0\n    for i in range(len(dir)):\n        flag = int(dir[i].split(\"/\")[-1].split(\",\")[0].split(\"_\")[0])\n        if flag > max_index:\n            max_index = flag\n\n    #############################################\n    \"\"\"placing patches to their certain picture\"\"\"\n    pic_index = {}\n    for elem in range(max_index + 1):\n        pic_index[elem] = []\n\n    for j in range(len(dir)):\n        for k in range(len(pic_index)):\n            if int(dir[j].split(\"/\")[-1].split(\",\")[0].split(\"_\")[0]) == k:\n                pic_index[k].append(dir[j])\n\n    print('the first picture contains ' + str(len(pic_index[0])) + ' patches')\n\n    ##########################################\n    \"\"\"getting the total columns of picture\"\"\"\n\n    def get_total_column(list):\n        max_col = 0\n        for l in range(len(list)):\n            col_flag = int(pic_index[0][l].split(\"/\")[-1].split(\",\")[0].split(\"_\")[1])\n            if col_flag > max_col:\n                max_col = col_flag\n        return max_col\n\n    print (\"this picture's total column is \" + str(get_total_column(pic_index[0])))\n    #########################################\n    \"\"\"getting the total rows of picture\"\"\"\n\n    def get_total_row(list):\n        max_row = 0\n        for o in range(len(list)):\n            row_flag = int(list[o].split(\"/\")[-1].split(\",\")[0].split(\"_\")[2])\n            if row_flag > max_row:\n                max_row = row_flag\n        return max_row\n\n    print (\"this picture's total row is \" + str(get_total_row(pic_index[0])))\n\n    ########################################\n    \"\"\"sorting this picture's patches with order of name\"\"\"\n\n    def sort_picture_patches(list):\n        return natsorted(list)\n\n    #####################################\n\n    \"\"\"doing global merging\"\"\"\n\n    for a in range(len(pic_index)):\n        max_col = get_total_column(pic_index[a])\n        max_row = get_total_row(pic_index[a])\n        pic_index[a] = sort_picture_patches(pic_index[a])\n        col_index = {}\n        for elem in range(max_col + 1):\n            col_index[elem] = []\n\n        for j in range(len(pic_index[a])):\n            for k in range(max_col):\n                if int(pic_index[a][j].split(\"/\")[-1].split(\",\")[0].split(\"_\")[1]) == k:\n                    col_index[k].append(pic_index[a][j])\n\n        saver = []\n\n        for i in range(max_col - 1):\n            flag = concat_img_vertical(col_index[i])\n            saver.append(flag)\n\n        res = concat_temp_horizon(saver)\n        img = PIL.Image.open(res).convert(\"L\")\n        arr = np.array(img)\n    return arr\n\ndef mass_patch_merge_to_one_from_folder(patch_dir,save_dir):\n    '''\n    After implementing the merging patches back to a whole image,\n    we can also do that same thing to a folder that contains several patches that\n    come from different images and merge and save them back to those original images (with partially\n    blurry) based on the naming habit of slicing images.\n    :param patch_dir: [string] directory to the folder that contains all patches(of an image)\n    :param save_dir: [string] directory to the folder that used to save all those merged images\n    :return: This time there's no return\n    '''\n    patch_dir = patch_dir\n    result_dir = save_dir\n\n    pi_imgs = []\n    valid_images = [\".jpg\"]\n    for f in os.listdir(patch_dir):\n        ext = os.path.splitext(f)[1]\n        if ext.lower() not in valid_images:\n            continue\n        pi_imgs.append(Image.open(os.path.join(patch_dir, f)))\n    total = len(pi_imgs)\n\n    print(str(len(pi_imgs)) + \" patches in total\")\n\n    #######################################################\n    \"\"\"Loading all the images from directory\"\"\"\n    dir = []\n    valid_images = [\".jpg\"]\n    for f in os.listdir(patch_dir):\n        ext = os.path.splitext(f)[1]\n        if ext.lower() not in valid_images:\n            continue\n        dir.append(os.path.join(patch_dir, f))\n\n\n    #####################################################\n    \"\"\"concat images with numpy\"\"\"\n\n    def concat_img_horizon(list_imgs):\n        imgs = [PIL.Image.open(i) for i in list_imgs]\n        # pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)\n        min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]\n        #\n        imgs_comb = np.hstack((np.asarray(i.resize(min_shape)) for i in imgs))\n        imgs_comb = PIL.Image.fromarray(imgs_comb)\n        return imgs_comb\n\n    def concat_img_vertical(list_imgs):\n        imgs = [PIL.Image.open(i) for i in list_imgs]\n        # pick the image which is the smallest, and resize the others to match it (can be arbitrary image shape here)\n        min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]\n        imgs_comb = np.hstack((np.asarray(i.resize(min_shape)) for i in imgs))\n        # for a vertical stacking it is simple: use vstack\n        imgs_comb = np.vstack((np.asarray(i.resize(min_shape)) for i in imgs))\n        imgs_comb = PIL.Image.fromarray(imgs_comb)\n        return imgs_comb\n\n    def concat_temp_horizon(list_imgs):\n        imgs_comb = np.hstack((np.asarray(i) for i in list_imgs))\n        imgs_comb = PIL.Image.fromarray(imgs_comb)\n        return imgs_comb\n\n    #########################################################\n\n\n    \"\"\"counting number of whole pictures in the folder\"\"\"\n    max_index = 0\n    for i in range(len(dir)):\n        flag = int(dir[i].split(\"/\")[-1].split(\",\")[0].split(\"_\")[0])\n        if flag > max_index:\n            max_index = flag\n\n    #############################################\n    \"\"\"placing patches to their certain picture\"\"\"\n    pic_index = {}\n    for elem in range(max_index + 1):\n        pic_index[elem] = []\n\n    for j in range(len(dir)):\n        for k in range(len(pic_index)):\n            if int(dir[j].split(\"/\")[-1].split(\",\")[0].split(\"_\")[0]) == k:\n                pic_index[k].append(dir[j])\n\n    print('the first picture contains ' + str(len(pic_index[0])) + ' patches')\n\n    ##########################################\n    \"\"\"getting the total columns of picture\"\"\"\n\n    def get_total_column(list):\n        max_col = 0\n        for l in range(len(list)):\n            col_flag = int(pic_index[0][l].split(\"/\")[-1].split(\",\")[0].split(\"_\")[1])\n            if col_flag > max_col:\n                max_col = col_flag\n        return max_col\n\n    print (\"this picture's total column is \" + str(get_total_column(pic_index[0])))\n    #########################################\n    \"\"\"getting the total rows of picture\"\"\"\n\n    def get_total_row(list):\n        max_row = 0\n        for o in range(len(list)):\n            row_flag = int(list[o].split(\"/\")[-1].split(\",\")[0].split(\"_\")[2])\n            if row_flag > max_row:\n                max_row = row_flag\n        return max_row\n\n    print (\"this picture's total row is \" + str(get_total_row(pic_index[0])))\n\n    ########################################\n    \"\"\"sorting this picture's patches with order of name\"\"\"\n\n    def sort_picture_patches(list):\n        return natsorted(list)\n\n    #####################################\n\n    \"\"\"doing global merging\"\"\"\n\n    for a in range(len(pic_index)):\n        max_col = get_total_column(pic_index[a])\n        max_row = get_total_row(pic_index[a])\n        pic_index[a] = sort_picture_patches(pic_index[a])\n        col_index = {}\n        for elem in range(max_col + 1):\n            col_index[elem] = []\n\n        for j in range(len(pic_index[a])):\n            for k in range(max_col):\n                if int(pic_index[a][j].split(\"/\")[-1].split(\",\")[0].split(\"_\")[1]) == k:\n                    col_index[k].append(pic_index[a][j])\n\n        saver = []\n\n        for i in range(max_col - 1):\n            flag = concat_img_vertical(col_index[i])\n            saver.append(flag)\n\n        res = concat_temp_horizon(saver)\n        res.save(result_dir + str(a) + '.jpg')\n\n\n\ndef image_to_patch(image_path, patch_size, patch_dir):\n    '''\n    cut an image into patch with certain size\n    !!! Sibo, please complete this function !!!\n\n    :param image_path: [string] path to the image(e.g. ./whole_image.jpg)\n    :param patch_size: [tuple] i.g. (30(length),30(witch))\n    :param patch_dir: [string] dir where to save the patch dir.(patch dir is defined to be the folder that contains all\n    image patches of an image)\n    :return:\n    '''\n    img = Image.open(image_path)\n    (imageWidth, imageHeight) = img.size\n    gridx = patch_size\n    gridy = patch_size\n    rangex = img.width / gridx\n    rangey = img.height / gridy\n    print rangex * rangey\n    for x in xrange(rangex):\n        for y in xrange(rangey):\n            bbox = (x * gridx, y * gridy, x * gridx + gridx, y * gridy + gridy)\n            slice_bit = img.crop(bbox)\n            slice_bit.save(patch_dir + str(x) + '_' + str(y) + '.jpg', optimize=True,\n                           bits=6)\n            print(patch_dir + str(x) + '_' + str(y) + '.jpg')\n    print(imageWidth)\n\n\ndef directory_to_patch(patch_size,original_path,no_blur_path,blur_path,all_img_path):\n    '''\n    We take a directory that contains several whole pictures and cut then into custom size of patches,\n    then apply 50% chance blur and non-blur to those patches, save them into blurry folder, non-blurry folder,\n    and a folder that contains all blurry and non-blurry patches with order.\n    :param patch_size: [integer] The custom patch size we want, e.g:for 30x30 patch, enter '30'\n    :param original_path: [string] The original path that contains all the original pictures without any modification\n    :param no_blur_path: [string] The destination path that contains all the non-blurry patches with order\n    :param blur_path: [string] The destination path that contains all the blurry patches with order\n    :param all_img_path: [string] The destination path that contains all the patches with order\n    :return: There's no return in this function, all the modified patches are saved into the destination path\n    '''\n    #motion blur preset\n    size = 15\n    gridx = patch_size\n    gridy = patch_size\n    kernel_motion_blur = np.zeros((size, size))\n    kernel_motion_blur[int((size - 1) / 2), :] = np.ones(size)\n    kernel_motion_blur = kernel_motion_blur / size\n\n    # go through every image in source folder\n    print('begin loading images')\n    pi_imgs = []\n    cv_imgs = []\n    valid_images = [\".jpg\"]\n    for f in os.listdir(original_path):\n        ext = os.path.splitext(f)[1]\n        if ext.lower() not in valid_images:\n            continue\n        pi_imgs.append(Image.open(os.path.join(original_path, f)))\n        cv_imgs.append(cv2.imread(os.path.join(original_path, f)))\n    print('finished loading images')\n    #\n\n    # looping to create blurry and non-blurry images in 50% chance\n    for i in range(len(pi_imgs)):\n        img = pi_imgs[i]\n        (imageWidth, imageHeight) = img.size\n\n        rangex = imageWidth / gridx\n        rangey = imageHeight / gridy\n        for x in xrange(rangex):\n            for y in xrange(rangey):\n\n                bbox = (x * gridx, y * gridy, x * gridx + gridx, y * gridy + gridy)\n                slice_bit = img.crop(bbox)\n                if random.randrange(2) == 0:\n                    slice_bit.save(no_blur_path + str(i) + '_' + str(x) + '_' + str(y) + ',noblur.jpg', optimize=True,\n                                   bits=6)\n                    slice_bit.save(all_img_path + str(i) + '_' + str(x) + '_' + str(y) + ',noblur.jpg', optimize=True,\n                                   bits=6)\n                    print(str(i))\n                else:\n                    slice_bit.save(blur_path + str(i) + '_' + str(x) + '_' + str(y) + ',blur.jpg', optimize=True, bits=6)\n                    img1 = cv2.imread(blur_path + str(i) + '_' + str(x) + '_' + str(y) + ',blur.jpg')\n                    output = cv2.filter2D(img1, -1, kernel_motion_blur)\n                    cv2.imwrite(blur_path + str(i) + '_' + str(x) + '_' + str(y) + ',blur.jpg', output)\n                    cv2.imwrite(all_img_path + str(i) + '_' + str(x) + '_' + str(y) + ',blur.jpg', output)\n                    print(str(i))\n\n"
  },
  {
    "path": "slicing.py",
    "content": "from PIL import Image\nsave_path = \"./testing/\"\n\nimg = Image.open(\"/Users/sibozhu/DeepLearning/testing/JR.jpg\")\n(imageWidth, imageHeight)=img.size\ngridx=30\ngridy=30\nrangex=imageWidth/gridx\nprint('Width: '+str(rangex))\nrangey=imageHeight/gridy\nprint('Height: '+str(rangey))\nprint str(rangex*rangey) + \" patches in total\"\nfor x in xrange(rangex):\n    for y in xrange(rangey):\n        bbox=(x*gridx, y*gridy, x*gridx+gridx, y*gridy+gridy)\n        slice_bit=img.crop(bbox)\n        slice_bit.save(save_path+str(x)+'_'+str(y)+'.jpg', optimize=True, bits=6)\n        # print (save_path+str(x)+'_'+str(y)+'.jpg')\nprint imageWidth\n\n"
  },
  {
    "path": "test_img_generator.py",
    "content": "import numpy as np\r\nimport cv2\r\nimport random\r\nfrom PIL import Image\r\nimport os, os.path\r\n\r\n#\r\n# NOTE: this function creates new partly blured images to tarpath folder\r\n#       and return a 3D-list labels which indicates blured parts in each image\r\n#\r\n# NOTE: please enter your correct folder path\r\n# NOTE: transpath is just a temp path that stores the blured copies of images\r\n# NOTE: Oripath is the source path and tarpath is where all the outputs are.\r\noripath = \"./s_oridata/\"\r\ntranspath = \"./transimg/\"\r\ntarpath = \"./tarimg/\"\r\n\r\n# global variables\r\nsize = 15\r\n\r\nlabels = []\r\n\r\ngridx = 30\r\ngridy = 30\r\n\r\nkernel_motion_blur = np.zeros((size, size))\r\nkernel_motion_blur[int((size-1)/2), :] = np.ones(size)\r\nkernel_motion_blur = kernel_motion_blur / size\r\n\r\n# go through every image in source folder\r\npi_imgs = []\r\ncv_imgs = []\r\nvalid_images = [\".jpg\"]\r\nfor f in os.listdir(oripath):\r\n    ext = os.path.splitext(f)[1]\r\n    if ext.lower() not in valid_images:\r\n        continue\r\n    pi_imgs.append(Image.open(os.path.join(oripath,f)))\r\n    cv_imgs.append(cv2.imread(os.path.join(oripath,f)))\r\n\r\n\r\n# looping to create blured images\r\nfor i in range(len(pi_imgs)):\r\n    # creating a blured copy of the original image\r\n    temp = cv_imgs[i]\r\n    blured = cv2.filter2D(temp, -1, kernel_motion_blur)\r\n    cv2.imwrite(transpath+str(i)+\".jpg\",blured)\r\n    \r\n    # reload both original and blured version\r\n    img1 = pi_imgs[i]\r\n    img2 = Image.open(transpath+str(i)+\".jpg\")\r\n    \r\n    # cut the image into 30*30 pieces \r\n    # and randomly choose half of the pieces to blur\r\n    (imageWidth, imageHeight)=img1.size\r\n    rangex=int(img1.width/gridx)\r\n    rangey=int(img1.height/gridy)\r\n    t_parts = rangex*rangey\r\n    \r\n    # creating labels that identifies which part is blured\r\n    label = []\r\n    for j in range(rangex):\r\n        label += [[0]*rangey]\r\n    \r\n    \r\n    for k in range(int(t_parts/2)):\r\n        x = random.randint(0,rangex-1)\r\n        y = random.randint(0,rangey-1)\r\n        label[x][y] = 1\r\n        \r\n        box = (x*gridx,y*gridy,x*gridx+gridx,y*gridy+gridy)\r\n        sliced = img2.crop(box)\r\n            \r\n    \r\n        img1.paste(sliced, (x*gridx,y*gridy))\r\n        \r\n    img1.save(tarpath + str(i)+\".jpg\")\r\n    \r\n    labels += [label]\r\n\r\n\r\n\r\n    \r\n    \r\n    \r\n    \r\n    \r\n"
  },
  {
    "path": "trainingdatacreate.py",
    "content": "import numpy as np\nimport cv2\nimport random\nfrom PIL import Image\nimport os, os.path\n\noripath = \"./s_oridata/\"\nnoblurpath = \"./s_cnn/train/no_blur/\"\nblurpath = \"./s_cnn/train/blur/\"\nallimgpath = \"./s_cnn/train/inputdata/\"\n\nsize = 15\ngridx=30\ngridy=30\n\nkernel_motion_blur = np.zeros((size, size))\nkernel_motion_blur[int((size-1)/2), :] = np.ones(size)\nkernel_motion_blur = kernel_motion_blur / size\n\n# img = Image.open(oripath+\"JR.jpg\")\n\n\n#go through every image in source folder\nprint('begin loading images')\npi_imgs = []\ncv_imgs = []\nvalid_images = [\".jpg\"]\nfor f in os.listdir(oripath):\n    ext = os.path.splitext(f)[1]\n    if ext.lower() not in valid_images:\n        continue\n    pi_imgs.append(Image.open(os.path.join(oripath,f)))\n    cv_imgs.append(cv2.imread(os.path.join(oripath,f)))\nprint('finished loading images')\n#\n\n#\n# looping to create blurry and non-blurry images in 50% chance\nfor i in range(len(pi_imgs)):\n    img = pi_imgs[i]\n    (imageWidth, imageHeight) = img.size\n\n    rangex = imageWidth / gridx\n    rangey = imageHeight / gridy\n    for x in xrange(rangex):\n        for y in xrange(rangey):\n\n            bbox = (x * gridx, y * gridy, x * gridx + gridx, y * gridy + gridy)\n            slice_bit = img.crop(bbox)\n            if random.randrange(2) == 0:\n                slice_bit.save(noblurpath + str(i)+'_'+ str(x) + '_' + str(y) + ',noblur.jpg', optimize=True, bits=6)\n                slice_bit.save(allimgpath + str(i)+'_'+ str(x) + '_' + str(y) + ',noblur.jpg', optimize=True,bits=6)\n                print(str(i))\n            else:\n                slice_bit.save(blurpath + str(i)+'_'+ str(x) + '_' + str(y) + ',blur.jpg', optimize=True, bits=6)\n                img1 = cv2.imread(blurpath + str(i)+'_'+ str(x) + '_' + str(y) + ',blur.jpg')\n                output = cv2.filter2D(img1, -1, kernel_motion_blur)\n                cv2.imwrite(blurpath + str(i)+'_'+ str(x) + '_' + str(y) + ',blur.jpg', output)\n                cv2.imwrite(allimgpath + str(i)+'_'+ str(x) + '_' + str(y) + ',blur.jpg', output)\n                print(str(i))\n\n\n\n\n\n\n"
  }
]