[
  {
    "path": ".gitignore",
    "content": "*.h5\n*.txt\n"
  },
  {
    "path": "README.md",
    "content": "# Fine-grained Dog Classification competition\n- This is a dog classification competition held by Baidu. Further information at http://js.baidu.com/\n\n## Framework\n- [Keras](https://keras.io/)\n- [Tensorflow Backend](https://www.tensorflow.org/)\n\n## Hardware\n- Geforce GTX TITANX 12G\n- Intel® Core™ i7-6700 CPU\n- Memory 16G\n- Operate system Ubuntu 14.04\n\n## Data\n- Download the images from Baidu Cloud\n  - Training Set: http://pan.baidu.com/s/1slLOqBz Key: 5axb\n  - Test set: http://pan.baidu.com/s/1gfaf9rt Key：fl5n\n- Put the images into diffrent directory by their class labels. Refer to [altoFolders.py](preprocessing/altoFolders.py) for doing this.\n- Take 20% of the labeled data for validation. Refer to [divforValidation.py](preprocessing/divforValidation.py).\n\n## Base Model\n- [VGG19](models/vgg19.py) for deep feature extraction, which is provided in keras models.\n- [InceptionV3](models/InceptionV3) for deep feature extraction, which is provided in keras models.\n- Softmax for classification.\n\n## Evaluate\n- Predict the classes for unlabeled data one by one refering to [predict_onebyone](evaluate/predict_onebyone.py) and by generator refering to [predict_bygenerator.by](evaluate/predict_bygenerator.py).\n## to be continued\n> Feel free to contact me if you have any issues or better ideas about anything.\n\n> by Holy"
  },
  {
    "path": "data/README.md",
    "content": "#Data\n\nData for train,validation and test.\n\nNote that the validation is 20% here from the original data. (Random sampling from different classes.)"
  },
  {
    "path": "data/test/README.md",
    "content": "#Test\nData for testing.\n\n"
  },
  {
    "path": "data/train/README.md",
    "content": "#TRAIN\nData for trainning.\n\n"
  },
  {
    "path": "data/val/README.md",
    "content": "#Validation\nData for validation.\n\n"
  },
  {
    "path": "evaluate/README.md",
    "content": "#evaluate\n\nsome evaluation methods to be added"
  },
  {
    "path": "evaluate/predict_bygenerator.py",
    "content": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 10 21:27:15 2017\n@author: Administrator\n\"\"\"\nimport os\nimport shutil\nimport cv2\nimport h5py\nimport numpy as np\nimport pandas as pd\nmodel_path=\"../models/model_dogs_Xception.h5\"\ntest_data_dir=\"../data/test\"\nval_data_dir=\"../data/val\"\nmodel=load_model(model_path)\ntest_datagen = ImageDataGenerator(rescale=1./255)\nbatch_size=64\n#for generate class_indices\nval_generator = test_datagen.flow_from_directory(\n    val_data_dir,\n    target_size=(299, 299),\n    batch_size=batch_size,\n    shuffle=False,\n    class_mode='categorical'\n)\nlabel_idxs = sorted(valid_generator.class_indices.items(), key=operator.itemgetter(1))\ntest_generator = test_datagen.flow_from_directory(\n        test_data_dir,\n        target_size=(299, 299),\n        batch_size=batch_size,\n        shuffle=False,\n        class_mode='categorical')\n\ny= model.predict_generator(test_generator, test_generator.samples/batch_size + 1)\ny_max_idx = np.argmax(y1, 1)\npredict_path = 'submission.txt'\n\nwith open(predict_path,'a') as fp:\n    for i, idx in enumerate(y_max_idx):\n        fp.write(str(label_idxs[idx][0]) + '\\t' + test_generator.filenames[i][6:-4] + '\\n')\n    fp.close"
  },
  {
    "path": "evaluate/predict_onebyone.py",
    "content": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 10 21:27:15 2017\n@author: Administrator\n\"\"\"\nimport os\nimport shutil\nimport cv2\nimport h5py\nimport numpy as np\nimport pandas as pd\nmodel_path=model_dogs_Xception.h5\nmodel=load_model(model_path)\ntest_filenames=os.listdir(test_data_dir)\ntest_img=[]\npredictions=[]\nfor file_path in test_filenames:\n    img=cv2.imread(test_data_dir+file_path)\n    img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n    img=cv2.resize(img,(299,299),interpolation=cv2.INTER_CUBIC)\n    test_img.append(img)\n    test_img=np.array(test_img)\n    test_img = test_img.astype('float32')\n    test_img/=255\n    pre=model.predict(test_img,batch_size=1)[0]\n    predictions.append(pre)\n    test_img=[]\n    \nprobs=np.array(predictions)\nclasses=np.argmax(probs,1)\n    \n#clsses-labels\nclass_indices=np.load('class_indices.txt.npy')\n#(int,int)\nclass_indices=class_indices.tolist()\n#(int,str)\nvalue_indices={v:k for k,v in class_indices.items()}\ntrue_class=[]\nfor i in range(len(classes)):\n   true_class.append(value_indices[classes[i]])\n\nwith open('submit.txt','a') as fp:\n    for i in range(len(test_filenames)):\n        fp.write(str(true_class[i])+\"\\t\"+str(test_filenames[i].split(\".\")[0])+'\\n')\n    fp.close"
  },
  {
    "path": "models/README.md",
    "content": "#Models\n\nSome models to be added"
  },
  {
    "path": "models/dogs.py",
    "content": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 10 20:49:09 2017\n@author: Administrator\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport cv2\nimport h5py\nfrom keras.models import Sequential, Model, load_model\nfrom keras import applications\nfrom keras import optimizers\nfrom keras.layers import Dropout, Flatten, Dense, Input,GlobalAveragePooling2D\nfrom keras.utils import vis_utils,plot_model\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import ModelCheckpoint,ReduceLROnPlateau\nfrom vgg19 import VGG19\n\n\n#模型的构建\nimg_rows, img_cols, img_channel = 400, 400, 3\nbase_model = VGG19(weights='imagenet', include_top=False,input_shape=(img_rows, img_cols, img_channel))\nadd_model = Sequential()\nadd_model.add(Flatten(input_shape=base_model.output_shape[1:]))\nadd_model.add(Dense(1024, activation='relu'))\nadd_model.add(Dense(100, activation='softmax'))\nmodel = Model(inputs=base_model.input, outputs=add_model(base_model.output))\nmodel.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),\n              metrics=['accuracy'])\n#冻结某些层设置\n#for layer in model.layers[:85]:\n #   layer.trainable = False\n#打印网络结构\n#model.summary()\nfor i, layer in enumerate(base_model.layers):\n   print(i, layer.name)\n#参数设置\nbatch_size = 32\nepochs = 50\ntrain_data_dir=\"data/train\"\nval_data_dir=\"data/val\"\n\n#plot_model(model,to_file='model.png')\ntrain_datagen = ImageDataGenerator(\n        rotation_range=30, \n        rescale=1./255,\n        shear_range=0.2,\n        zoom_range=0.2,\n        width_shift_range=0.1,\n        height_shift_range=0.1, \n        horizontal_flip=True)\n\t\t\nval_datagen=ImageDataGenerator(rescale=1./255)\n\t\t\nval_generator = val_datagen.flow_from_directory(\n        val_data_dir,\n        target_size=(img_rows, img_cols),\n        batch_size=batch_s,\n        class_mode='categorical')\n\ntrain_generator = train_datagen.flow_from_directory(\n        train_data_dir,\n        target_size=(img_rows, img_cols),\n        batch_size=batch_size,\n        class_mode='categorical')\n#保存映射表\n#with h5py.File(\"class_indices.h5\") as h:\n    #h.create_dataset(\"class_indices\",data=train_generator.class_indices)\n#np.save('class_indices.txt', train_generator.class_indices)\n\nhistory = model.fit_generator(\n     train_generator,\n     steps_per_epoch=train_generator.samples/batch_size,\n     epochs=epochs,\n\t validation_data=val_generator,\n     validation_steps=batch_s\n     #callbacks=[ModelCheckpoint('VGG16-transferlearning.model', monitor='val_acc', save_best_only=True)]\n )\nmodel.save('model_dogs_VGG19_400*400_full.h5') \n\n"
  },
  {
    "path": "models/inceptionV3.py",
    "content": "import os\nimport keras\nimport numpy as np\nfrom keras import Input\nfrom keras import backend as K\nfrom keras.applications import Xception,InceptionV3\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom keras.layers import Dense, Dropout, Lambda,AveragePooling2D,Flatten\nfrom keras.models import Model, load_model\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import SGD\nfrom keras.utils import plot_model\n\nimg_rows,img_cols,img_channel=299,299,3\ntrain_datagen = ImageDataGenerator(\n        rotation_range=30, \n        rescale=1./255,\n        shear_range=0.3,\n        zoom_range=0.2,\n        width_shift_range=0.2,\n        height_shift_range=0.2, \n        horizontal_flip=True)\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\nearly_stopping = EarlyStopping(monitor='val_loss', patience=3)\nauto_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=0, mode='auto', epsilon=0.001, cooldown=0, min_lr=0)\nsave_model = ModelCheckpoint('InceptionV3{epoch:02d}-{val_acc:.2f}.h5', period=2)\n\n# create the base pre-trained model\ninput_tensor = Input(shape=(img_rows, img_cols, img_channel))\nbase_model = InceptionV3(weights='imagenet', include_top=False,input_shape=(img_rows, img_cols, img_channel))\nx=AveragePooling2D(pool_size=(4,4))(base_model.output)\nx=Dropout(0.5)(x)\nx=Flatten()(x)\nx=Dense(100,activation='softmax')(x)\n#base_model\nmodel = Model(inputs=base_model.input, outputs=x)\n\nfor layer in base_model.layers:\n    layer.trainable = False\n\nbatch_size = 48\nepoch=5\ntrain_generator = train_datagen.flow_from_directory(\n    '../data/train',\n    target_size=(img_rows, img_cols),\n    batch_size=batch_size,\n    class_mode='categorical')\n\nvalidation_generator = test_datagen.flow_from_directory(\n    '../data/val',\n    target_size=(img_rows, img_cols),\n    batch_size=batch_size,\n    class_mode='categorical')\n\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=SGD(lr=1e-3, momentum=0.9), metrics=['accuracy'])\n# model = make_parallel(model, 3)\n# train fc \nmodel.fit_generator(train_generator,\n                steps_per_epoch=train_generator.samples/batch_size+1,\n                epochs=epoch,\n                validation_data=validation_generator,\n                validation_steps=validation_generator.samples/batch_size+1\n                #callbacks=[early_stopping, auto_lr, save_model]\n                )\n\n\n# train all\t\t\t\t\nfor layer in model.layers:\n    layer.trainable = True  \nmodel.summary()          \nbatch_size=24\nepoch=30\ntrain_generator = train_datagen.flow_from_directory(\n\t'../data/train',\n\ttarget_size=(img_rows, img_cols),\n\tbatch_size=batch_size,\n\tclass_mode='categorical')\n\nvalidation_generator = test_datagen.flow_from_directory(\n\t'../data/val',\n\ttarget_size=(img_rows, img_cols),\n\tbatch_size=batch_size,\n\tclass_mode='categorical')\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=SGD(lr=1e-4, momentum=0.9), metrics=['accuracy'])\nsave_model = ModelCheckpoint('InceptionV3-{epoch:02d}-{val_acc:.2f}.h5', period=2)\nmodel.fit_generator(train_generator,\n                steps_per_epoch=train_generator.samples/batch_size+1,\n                epochs=epoch,\n                validation_data=validation_generator,\n                validation_steps=validation_generator.samples/batch_size+1,\n                callbacks=[auto_lr, save_model]) # otherwise the generator would loop indefinitely\nmodel.save('inceptionV3.h5')"
  },
  {
    "path": "models/vgg19.py",
    "content": "# -*- coding: utf-8 -*-\n'''VGG19 model for Keras.\n# Reference:\n- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)\n'''\nfrom __future__ import print_function\n\nimport numpy as np\nimport warnings\nfrom keras.models import Model\nfrom keras.layers import Flatten, Dense, Input\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import GlobalMaxPooling2D\nfrom keras.layers import GlobalAveragePooling2D\nfrom keras.preprocessing import image\nfrom keras.utils import layer_utils\nfrom keras.utils.data_utils import get_file\nfrom keras import backend as K\nfrom keras.applications.imagenet_utils import decode_predictions\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom keras.applications.imagenet_utils import _obtain_input_shape\nfrom keras.engine.topology import get_source_inputs\n\nWEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels.h5'\nWEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'\n\ndef VGG19(include_top=True, weights='imagenet',\n          input_tensor=None, input_shape=None,\n          pooling=None,\n          classes=1000):\n    \"\"\"Instantiates the VGG19 architecture.\n    Optionally loads weights pre-trained\n    on ImageNet. Note that when using TensorFlow,\n    for best performance you should set\n    `image_data_format=\"channels_last\"` in your Keras config\n    at ~/.keras/keras.json.\n    The model and the weights are compatible with both\n    TensorFlow and Theano. The data format\n    convention used by the model is the one\n    specified in your Keras config file.\n    # Arguments\n        include_top: whether to include the 3 fully-connected\n            layers at the top of the network.\n        weights: one of `None` (random initialization)\n            or \"imagenet\" (pre-training on ImageNet).\n        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)\n            to use as image input for the model.\n        input_shape: optional shape tuple, only to be specified\n            if `include_top` is False (otherwise the input shape\n            has to be `(224, 224, 3)` (with `channels_last` data format)\n            or `(3, 224, 244)` (with `channels_first` data format).\n            It should have exactly 3 inputs channels,\n            and width and height should be no smaller than 48.\n            E.g. `(200, 200, 3)` would be one valid value.\n        pooling: Optional pooling mode for feature extraction\n            when `include_top` is `False`.\n            - `None` means that the output of the model will be\n                the 4D tensor output of the\n                last convolutional layer.\n            - `avg` means that global average pooling\n                will be applied to the output of the\n                last convolutional layer, and thus\n                the output of the model will be a 2D tensor.\n            - `max` means that global max pooling will\n                be applied.\n        classes: optional number of classes to classify images\n            into, only to be specified if `include_top` is True, and\n            if no `weights` argument is specified.\n    # Returns\n        A Keras model instance.\n    # Raises\n        ValueError: in case of invalid argument for `weights`,\n            or invalid input shape.\n    \"\"\"\n    if weights not in {'imagenet', None}:\n        raise ValueError('The `weights` argument should be either '\n                         '`None` (random initialization) or `imagenet` '\n                         '(pre-training on ImageNet).')\n\n    if weights == 'imagenet' and include_top and classes != 1000:\n        raise ValueError('If using `weights` as imagenet with `include_top`'\n                         ' as true, `classes` should be 1000')\n    # Determine proper input shape\n    input_shape = _obtain_input_shape(input_shape,\n                                      default_size=224,\n                                      min_size=48,\n                                      data_format=K.image_data_format(),\n                                      include_top=include_top)\n\n    if input_tensor is None:\n        img_input = Input(shape=input_shape)\n    else:\n        if not K.is_keras_tensor(input_tensor):\n            img_input = Input(tensor=input_tensor, shape=input_shape)\n        else:\n            img_input = input_tensor\n    # Block 1\n    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)\n    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)\n    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\n\n    # Block 2\n    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)\n    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)\n    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\n\n    # Block 3\n    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)\n    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)\n    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)\n    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)\n    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\n\n    # Block 4\n    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)\n    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)\n    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)\n    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)\n    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\n\n    # Block 5\n    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)\n    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)\n    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)\n    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x)\n    x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)\n\n    if include_top:\n        # Classification block\n        x = Flatten(name='flatten')(x)\n        x = Dense(4096, activation='relu', name='fc1')(x)\n        x = Dense(4096, activation='relu', name='fc2')(x)\n        x = Dense(classes, activation='softmax', name='predictions')(x)\n    else:\n        if pooling == 'avg':\n            x = GlobalAveragePooling2D()(x)\n        elif pooling == 'max':\n            x = GlobalMaxPooling2D()(x)\n\n    # Ensure that the model takes into account\n    # any potential predecessors of `input_tensor`.\n    if input_tensor is not None:\n        inputs = get_source_inputs(input_tensor)\n    else:\n        inputs = img_input\n    # Create model.\n    model = Model(inputs, x, name='vgg19')\n\n    # load weights\n    if weights == 'imagenet':\n        if include_top:\n            weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels.h5',\n                                    WEIGHTS_PATH,\n                                    cache_subdir='models')\n        else:\n            weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',\n                                    WEIGHTS_PATH_NO_TOP,\n                                    cache_subdir='models')\n        model.load_weights(weights_path)\n        if K.backend() == 'theano':\n            layer_utils.convert_all_kernels_in_model(model)\n\n        if K.image_data_format() == 'channels_first':\n            if include_top:\n                maxpool = model.get_layer(name='block5_pool')\n                shape = maxpool.output_shape[1:]\n                dense = model.get_layer(name='fc1')\n                layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')\n\n            if K.backend() == 'tensorflow':\n                warnings.warn('You are using the TensorFlow backend, yet you '\n                              'are using the Theano '\n                              'image data format convention '\n                              '(`image_data_format=\"channels_first\"`). '\n                              'For best performance, set '\n                              '`image_data_format=\"channels_last\"` in '\n                              'your Keras config '\n                              'at ~/.keras/keras.json.')\n    return model\n\n\nif __name__ == '__main__':\n    model = VGG19(include_top=True, weights='imagenet')\n\n    img_path = 'cat.jpg'\n    img = image.load_img(img_path, target_size=(224, 224))\n    x = image.img_to_array(img)\n    x = np.expand_dims(x, axis=0)\n    x = preprocess_input(x)\n    print('Input image shape:', x.shape)\n\n    preds = model.predict(x)\n    print('Predicted:', decode_predictions(preds))"
  },
  {
    "path": "preprocessing/README.md",
    "content": "#Preprocessing\n\nSome preprocessing to be added."
  },
  {
    "path": "preprocessing/altoFolders.py",
    "content": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 10 19:15:14 2017\n@author: holy\n\"\"\"\nimport os\nimport shutil\nimport pandas as pd\nimport numpy as np\nlabel = pd.read_csv(\"originaldata.txt\",sep='\\s+',encoding='utf-8',escapechar='\\n')\ntrain_filenames=os.listdir('originaldata')\ndef ex_mkdir(dirname):\n    if not os.path.exists(dirname):\n        os.mkdir(dirname) \ndef rmrf_mkdir(dirname):\n    if os.path.exists(dirname):\n        shutil.rmtree(dirname)\n    os.mkdir(dirname)      \nex_mkdir('../data/train')\nfor iter in label.index:\n    name=label.iloc[iter,0]\n    i=label.iloc[iter,1]\n    ex_mkdir('train2/'+str(i))\n    shutil.copy('originaldata'+name+'.jpg', '../data/train/'+str(i)+'/'+name+'.jpg')"
  },
  {
    "path": "preprocessing/divforValidation.py",
    "content": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 10 19:15:14 2017\n@author: holy\n\"\"\"\nfilename ='../data/train'\ntrain_dir='../data/train'\nval_dir='../data/val'\nls=os.listdir(filename)\ndef ex_mkdir(dirname):\n    if not os.path.exists(dirname):\n        os.mkdir(dirname) \ndef rmrf_mkdir(dirname):\n    if os.path.exists(dirname):\n        shutil.rmtree(dirname)\n    os.mkdir(dirname)      \nex_mkdir(val_dir)\nfor i in range(0,len(ls)):\n    ex_mkdir(val_dir+str(ls[i]))\n    data=os.listdir(train_dir+str(ls[i]))\n    for j in range(0,int(0.2*len(data))):#%20 for validation\n        name=data[j]\n        shutil.move(train_dir+str(ls[i])+'/'+name,val_dir+str(ls[i])+'/'+name)"
  }
]