Full Code of lsh1994/keras-segmentation for AI

master 24d2859c2be7 cached
13 files
32.8 KB
9.8k tokens
29 symbols
1 requests
Download .txt
Repository: lsh1994/keras-segmentation
Branch: master
Commit: 24d2859c2be7
Files: 13
Total size: 32.8 KB

Directory structure:
gitextract_sm4wmp9m/

├── .gitignore
├── LoadBatches.py
├── Models/
│   ├── FCN32.py
│   ├── FCN8.py
│   ├── SegNet.py
│   ├── UNet.py
│   ├── utils.py
│   └── utils_cpu.py
├── predict.py
├── readme.md
├── readme_zh.md
├── train.py
└── visualizeDataset.py

================================================
FILE CONTENTS
================================================

================================================
FILE: .gitignore
================================================
/data
/__pycache__
/.idea
/test
/output
/Models/__pycache__


================================================
FILE: LoadBatches.py
================================================
import numpy as np
import cv2
import glob
import itertools
import matplotlib.pyplot as plt
import random


def getImageArr(im):

    img = im.astype(np.float32)

    img[:, :, 0] -= 103.939
    img[:, :, 1] -= 116.779
    img[:, :, 2] -= 123.68

    return img


def getSegmentationArr(seg, nClasses, input_height, input_width):

    seg_labels = np.zeros((input_height, input_width, nClasses))

    for c in range(nClasses):
        seg_labels[:, :, c] = (seg == c).astype(int)

    seg_labels = np.reshape(seg_labels, (-1, nClasses))
    return seg_labels


def imageSegmentationGenerator(images_path, segs_path, batch_size,
                               n_classes, input_height, input_width):

    assert images_path[-1] == '/'
    assert segs_path[-1] == '/'

    images = sorted(glob.glob(images_path + "*.jpg") +
                    glob.glob(images_path + "*.png") + glob.glob(images_path + "*.jpeg"))

    segmentations = sorted(glob.glob(segs_path + "*.jpg") +
                           glob.glob(segs_path + "*.png") + glob.glob(segs_path + "*.jpeg"))

    zipped = itertools.cycle(zip(images, segmentations))

    while True:
        X = []
        Y = []
        for _ in range(batch_size):
            im, seg = zipped.__next__()
            im = cv2.imread(im, 1)
            seg = cv2.imread(seg, 0)

            assert im.shape[:2] == seg.shape[:2]

            assert im.shape[0] >= input_height and im.shape[1] >= input_width

            xx = random.randint(0, im.shape[0] - input_height)
            yy = random.randint(0, im.shape[1] - input_width)

            im = im[xx:xx + input_height, yy:yy + input_width]
            seg = seg[xx:xx + input_height, yy:yy + input_width]

            X.append(getImageArr(im))
            Y.append(
                getSegmentationArr(
                    seg,
                    n_classes,
                    input_height,
                    input_width))

        yield np.array(X), np.array(Y)


if __name__ == '__main__':
    G = imageSegmentationGenerator("data/dataset1/images_prepped_train/",
                                   "data/dataset1/annotations_prepped_train/", batch_size=16, n_classes=15, input_height=320, input_width=320)
    x, y = G.__next__()
    print(x.shape, y.shape)


================================================
FILE: Models/FCN32.py
================================================
from keras.applications import vgg16
from keras.models import Model, Sequential
from keras.layers import Conv2D, Conv2DTranspose, Input, Cropping2D, add, Dropout, Reshape, Activation
from keras.utils import plot_model


def FCN32(nClasses, input_height, input_width):

    assert input_height % 32 == 0
    assert input_width % 32 == 0

    img_input = Input(shape=(input_height, input_width, 3))

    model = vgg16.VGG16(
        include_top=False,
        weights='imagenet', input_tensor=img_input)
    assert isinstance(model, Model)

    o = Conv2D(
        filters=4096,
        kernel_size=(
            7,
            7),
        padding="same",
        activation="relu",
        name="fc6")(
            model.output)
    o = Dropout(rate=0.5)(o)
    o = Conv2D(
        filters=4096,
        kernel_size=(
            1,
            1),
        padding="same",
        activation="relu",
        name="fc7")(o)
    o = Dropout(rate=0.5)(o)

    o = Conv2D(filters=nClasses, kernel_size=(1, 1), padding="same", activation="relu", kernel_initializer="he_normal",
               name="score_fr")(o)

    o = Conv2DTranspose(filters=nClasses, kernel_size=(32, 32), strides=(32, 32), padding="valid", activation=None,
                        name="score2")(o)

    o = Reshape((-1, nClasses))(o)
    o = Activation("softmax")(o)

    fcn8 = Model(inputs=img_input, outputs=o)
    # mymodel.summary()
    return fcn8


if __name__ == '__main__':
    m = FCN32(15, 320, 320)
    m.summary()
    plot_model(m, show_shapes=True, to_file='model_fcn32.png')
    print(len(m.layers))


================================================
FILE: Models/FCN8.py
================================================
from keras.applications import vgg16
from keras.models import Model, Sequential
from keras.layers import Conv2D, Conv2DTranspose, Input, Cropping2D, add, Dropout, Reshape, Activation


def FCN8_helper(nClasses, input_height, input_width):

    assert input_height % 32 == 0
    assert input_width % 32 == 0

    img_input = Input(shape=(input_height, input_width, 3))

    model = vgg16.VGG16(
        include_top=False,
        weights='imagenet', input_tensor=img_input,
        pooling=None,
        classes=1000)
    assert isinstance(model, Model)

    o = Conv2D(
        filters=4096,
        kernel_size=(
            7,
            7),
        padding="same",
        activation="relu",
        name="fc6")(
            model.output)
    o = Dropout(rate=0.5)(o)
    o = Conv2D(
        filters=4096,
        kernel_size=(
            1,
            1),
        padding="same",
        activation="relu",
        name="fc7")(o)
    o = Dropout(rate=0.5)(o)

    o = Conv2D(filters=nClasses, kernel_size=(1, 1), padding="same", activation="relu", kernel_initializer="he_normal",
               name="score_fr")(o)

    o = Conv2DTranspose(filters=nClasses, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None,
                        name="score2")(o)

    fcn8 = Model(inputs=img_input, outputs=o)
    # mymodel.summary()
    return fcn8


def FCN8(nClasses, input_height, input_width):

    fcn8 = FCN8_helper(nClasses, input_height, input_width)

    # Conv to be applied on Pool4
    skip_con1 = Conv2D(nClasses, kernel_size=(1, 1), padding="same", activation=None, kernel_initializer="he_normal",
                       name="score_pool4")(fcn8.get_layer("block4_pool").output)
    Summed = add(inputs=[skip_con1, fcn8.output])

    x = Conv2DTranspose(nClasses, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None,
                        name="score4")(Summed)

    ###
    skip_con2 = Conv2D(nClasses, kernel_size=(1, 1), padding="same", activation=None, kernel_initializer="he_normal",
                       name="score_pool3")(fcn8.get_layer("block3_pool").output)
    Summed2 = add(inputs=[skip_con2, x])

    #####
    Up = Conv2DTranspose(nClasses, kernel_size=(8, 8), strides=(8, 8),
                         padding="valid", activation=None, name="upsample")(Summed2)

    Up = Reshape((-1, nClasses))(Up)
    Up = Activation("softmax")(Up)

    mymodel = Model(inputs=fcn8.input, outputs=Up)

    return mymodel


if __name__ == '__main__':
    m = FCN8(15, 320, 320)
    from keras.utils import plot_model
    plot_model(m, show_shapes=True, to_file='model_fcn8.png')
    print(len(m.layers))


================================================
FILE: Models/SegNet.py
================================================
"""
@author: LiShiHang
@software: PyCharm
@file: SegNet.py
@time: 2018/12/18 14:58
"""
from keras import Model,layers
from keras.layers import Input,Conv2D,BatchNormalization,Activation,Reshape

from Models.utils import MaxUnpooling2D,MaxPoolingWithArgmax2D

def SegNet(nClasses, input_height, input_width):

    assert input_height % 32 == 0
    assert input_width % 32 == 0

    img_input = Input(shape=( input_height, input_width,3))

    # Block 1
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv1')(img_input)
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv2')(x)
    x, mask_1 = MaxPoolingWithArgmax2D(name='block1_pool')(x)

    # Block 2
    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv1')(x)
    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv2')(x)
    x , mask_2 = MaxPoolingWithArgmax2D(name='block2_pool')(x)

    # Block 3
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv1')(x)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv2')(x)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv3')(x)
    x, mask_3 = MaxPoolingWithArgmax2D(name='block3_pool')(x)

    # Block 4
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv1')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv2')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv3')(x)

    x, mask_4 = MaxPoolingWithArgmax2D(name='block4_pool')(x)

    # Block 5
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv1')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv2')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv3')(x)
    x, mask_5 = MaxPoolingWithArgmax2D(name='block5_pool')(x)

    Vgg_streamlined=Model(inputs=img_input,outputs=x)

    # 加载vgg16的预训练权重
    Vgg_streamlined.load_weights(r"E:\Code\PycharmProjects\keras-segmentation\data\vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5")

    # 解码层
    unpool_1 = MaxUnpooling2D()([x, mask_5])
    y = Conv2D(512, (3,3), padding="same")(unpool_1)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)
    y = Conv2D(512, (3, 3), padding="same")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)
    y = Conv2D(512, (3, 3), padding="same")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)

    unpool_2 = MaxUnpooling2D()([y, mask_4])
    y = Conv2D(512, (3, 3), padding="same")(unpool_2)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)
    y = Conv2D(512, (3, 3), padding="same")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)
    y = Conv2D(256, (3, 3), padding="same")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)

    unpool_3 = MaxUnpooling2D()([y, mask_3])
    y = Conv2D(256, (3, 3), padding="same")(unpool_3)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)
    y = Conv2D(256, (3, 3), padding="same")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)
    y = Conv2D(128, (3, 3), padding="same")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)

    unpool_4 = MaxUnpooling2D()([y, mask_2])
    y = Conv2D(128, (3, 3), padding="same")(unpool_4)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)
    y = Conv2D(64, (3, 3), padding="same")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)

    unpool_5 = MaxUnpooling2D()([y, mask_1])
    y = Conv2D(64, (3, 3), padding="same")(unpool_5)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)

    y = Conv2D(nClasses, (1, 1), padding="same")(y)
    y = BatchNormalization()(y)
    y = Activation("relu")(y)

    y = Reshape((-1, nClasses))(y)
    y = Activation("softmax")(y)

    model=Model(inputs=img_input,outputs=y)
    return model



if __name__ == '__main__':
    m = SegNet(15,320, 320)
    # print(m.get_weights()[2]) # 看看权重改变没,加载vgg权重测试用
    from keras.utils import plot_model
    plot_model(m, show_shapes=True, to_file='model_segnet.png')
    print(len(m.layers))
    m.summary()


================================================
FILE: Models/UNet.py
================================================
"""
@author: LiShiHang
@software: PyCharm
@file: UNet.py
@time: 2018/12/27 16:54
@desc:
"""
from keras import Model, layers
from keras.applications import vgg16
from keras.layers import Input, Conv2D, BatchNormalization, Activation, Reshape, MaxPool2D, concatenate, UpSampling2D


def UNet(nClasses, input_height, input_width):
    assert input_height % 32 == 0
    assert input_width % 32 == 0

    img_input = Input(shape=(input_height, input_width, 3))

    vgg_streamlined = vgg16.VGG16(
        include_top=False,
        weights='imagenet', input_tensor=img_input)
    assert isinstance(vgg_streamlined, Model)

    # 解码层
    o = UpSampling2D((2, 2))(vgg_streamlined.output)
    o = concatenate([vgg_streamlined.get_layer(
        name="block4_pool").output, o], axis=-1)
    o = Conv2D(512, (3, 3), padding="same")(o)
    o = BatchNormalization()(o)

    o = UpSampling2D((2, 2))(o)
    o = concatenate([vgg_streamlined.get_layer(
        name="block3_pool").output, o], axis=-1)
    o = Conv2D(256, (3, 3), padding="same")(o)
    o = BatchNormalization()(o)

    o = UpSampling2D((2, 2))(o)
    o = concatenate([vgg_streamlined.get_layer(
        name="block2_pool").output, o], axis=-1)
    o = Conv2D(128, (3, 3), padding="same")(o)
    o = BatchNormalization()(o)

    o = UpSampling2D((2, 2))(o)
    o = concatenate([vgg_streamlined.get_layer(
        name="block1_pool").output, o], axis=-1)
    o = Conv2D(64, (3, 3), padding="same")(o)
    o = BatchNormalization()(o)

    # UNet网络处理输入时进行了镜面放大2倍,所以最终的输入输出缩小了2倍
    # 此处直接上采样置原始大小
    o = UpSampling2D((2, 2))(o)
    o = Conv2D(64, (3, 3), padding="same")(o)
    o = BatchNormalization()(o)

    o = Conv2D(nClasses, (1, 1), padding="same")(o)
    o = BatchNormalization()(o)
    o = Activation("relu")(o)

    o = Reshape((-1, nClasses))(o)
    o = Activation("softmax")(o)

    model = Model(inputs=img_input, outputs=o)
    return model


if __name__ == '__main__':
    m = UNet(15, 320, 320)
    # print(m.get_weights()[2]) # 看看权重改变没,加载vgg权重测试用
    from keras.utils import plot_model
    plot_model(m, show_shapes=True, to_file='model_unet.png')
    print(len(m.layers))
    m.summary()


================================================
FILE: Models/utils.py
================================================
"""
@author: LiShiHang
@software: PyCharm
@file: utils.py
@time: 2018/12/18 14:58
"""
from keras.engine import Layer
import keras.backend as K


class MaxPoolingWithArgmax2D(Layer):

    def __init__(
            self,
            pool_size=(2, 2),
            strides=(2, 2),
            padding='same',
            **kwargs):
        super(MaxPoolingWithArgmax2D, self).__init__(**kwargs)
        self.padding = padding
        self.pool_size = pool_size
        self.strides = strides

    def call(self, inputs, **kwargs):
        padding = self.padding
        pool_size = self.pool_size
        strides = self.strides
        if K.backend() == 'tensorflow':
            ksize = [1, pool_size[0], pool_size[1], 1]
            padding = padding.upper()
            strides = [1, strides[0], strides[1], 1]
            output, argmax = K.tf.nn.max_pool_with_argmax(
                inputs,
                ksize=ksize,
                strides=strides,
                padding=padding)
        else:
            errmsg = '{} backend is not supported for layer {}'.format(
                K.backend(), type(self).__name__)
            raise NotImplementedError(errmsg)
        argmax = K.cast(argmax, K.floatx())
        return [output, argmax]

    def compute_output_shape(self, input_shape):
        ratio = (1, 2, 2, 1)
        output_shape = [
            dim // ratio[idx]
            if dim is not None else None
            for idx, dim in enumerate(input_shape)]
        output_shape = tuple(output_shape)
        return [output_shape, output_shape]

    def compute_mask(self, inputs, mask=None):
        return 2 * [None]


class MaxUnpooling2D(Layer):
    def __init__(self, up_size=(2, 2), **kwargs):
        super(MaxUnpooling2D, self).__init__(**kwargs)
        self.up_size = up_size

    def call(self, inputs, output_shape=None):

        updates, mask = inputs[0], inputs[1]
        with K.tf.variable_scope(self.name):
            mask = K.cast(mask, 'int32')
            input_shape = K.tf.shape(updates, out_type='int32')
            #  calculation new shape
            if output_shape is None:
                output_shape = (
                    input_shape[0],
                    input_shape[1] * self.up_size[0],
                    input_shape[2] * self.up_size[1],
                    input_shape[3])

            # calculation indices for batch, height, width and feature maps
            one_like_mask = K.ones_like(mask, dtype='int32')
            batch_shape = K.concatenate(
                [[input_shape[0]], [1], [1], [1]],
                axis=0)
            batch_range = K.reshape(
                K.tf.range(output_shape[0], dtype='int32'),
                shape=batch_shape)
            b = one_like_mask * batch_range
            y = mask // (output_shape[2] * output_shape[3])
            x = (mask // output_shape[3]) % output_shape[2]
            feature_range = K.tf.range(output_shape[3], dtype='int32')
            f = one_like_mask * feature_range

            # transpose indices & reshape update values to one dimension
            updates_size = K.tf.size(updates)
            indices = K.transpose(K.reshape(
                K.stack([b, y, x, f]),
                [4, updates_size]))
            values = K.reshape(updates, [updates_size])
            ret = K.tf.scatter_nd(indices, values, output_shape)
            return ret

    def compute_output_shape(self, input_shape):
        mask_shape = input_shape[1]
        return (
            mask_shape[0],
            mask_shape[1] * self.up_size[0],
            mask_shape[2] * self.up_size[1],
            mask_shape[3]
        )


if __name__ == '__main__':

    import keras
    import numpy as np

    # input = keras.layers.Input((4, 4, 3))
    # o = MaxPoolingWithArgmax2D()(input)
    # model = keras.Model(inputs=input, outputs=o)  # outputs=o
    # model.compile(optimizer="adam", loss='categorical_crossentropy')
    # x = np.random.randint(0, 100, (3, 4, 4, 3)) # 调试此处
    # m = model.predict(x) # 调试此处
    # print(m)

    input = keras.layers.Input((4, 4, 3))
    o = MaxPoolingWithArgmax2D()(input)
    o2 = MaxUnpooling2D()(o)
    model = keras.Model(inputs=input, outputs=o2)  # outputs=o
    model.compile(optimizer="adam", loss='categorical_crossentropy')
    x = np.random.randint(0, 100, (3, 4, 4, 3))  # 调试此处
    m = model.predict(x)  # 调试此处
    print(m)


================================================
FILE: Models/utils_cpu.py
================================================
"""
@author: LiShiHang
@software: PyCharm
@file: utils.py
@time: 2018/12/18 14:58
"""
from keras.engine import Layer
import keras.backend as K
from keras.utils import plot_model

class MaxPoolingWithArgmax2D(Layer):

    def __init__(
            self,
            pool_size=(2, 2),
            strides=(2, 2),
            padding='same',
            **kwargs):
        super(MaxPoolingWithArgmax2D, self).__init__(**kwargs)
        self.padding = padding
        self.pool_size = pool_size
        self.strides = strides

    def call(self, inputs, **kwargs):
        padding = self.padding
        pool_size = self.pool_size
        strides = self.strides
        if K.backend() == 'tensorflow':
            ksize = [1, pool_size[0], pool_size[1], 1]
            padding = padding.upper()
            strides = [1, strides[0], strides[1], 1]
            output, argmax = K.tf.nn.max_pool_with_argmax(
                inputs,
                ksize=ksize,
                strides=strides,
                padding=padding)
        else:
            errmsg = '{} backend is not supported for layer {}'.format(
                K.backend(), type(self).__name__)
            raise NotImplementedError(errmsg)
        argmax = K.cast(argmax, K.floatx())
        return [output, argmax]

    def compute_output_shape(self, input_shape):
        ratio = (1, 2, 2, 1)
        output_shape = [
            dim // ratio[idx]
            if dim is not None else None
            for idx, dim in enumerate(input_shape)]
        output_shape = tuple(output_shape)
        return [output_shape, output_shape]

    def compute_mask(self, inputs, mask=None):
        return 2 * [None]


class MaxUnpooling2D(Layer):
    def __init__(self, up_size=(2, 2), **kwargs):
        super(MaxUnpooling2D, self).__init__(**kwargs)
        self.up_size = up_size

    def call(self, inputs, output_shape=None):
        updates, mask = inputs[0], inputs[1]
        with K.tf.variable_scope(self.name):
            mask = K.cast(mask, 'int32')
            input_shape = K.tf.shape(updates, out_type='int32')
            #  calculation new shape
            if output_shape is None:
                output_shape = (
                    input_shape[0],
                    input_shape[1] * self.up_size[0],
                    input_shape[2] * self.up_size[1],
                    input_shape[3])

            # calculation indices for batch, height, width and feature maps
            one_like_mask = K.ones_like(mask, dtype='int32')
            batch_shape = K.concatenate(
                [[input_shape[0]], [1], [1], [1]],
                axis=0)
            batch_range = K.reshape(
                K.tf.range(output_shape[0], dtype='int32'),
                shape=batch_shape)
            b = one_like_mask * batch_range
            # y = mask // (output_shape[2] * output_shape[3])
            feature_range = K.tf.range(output_shape[3], dtype='int32')
            f = one_like_mask * feature_range
            y = (mask - f) // (output_shape[2] * output_shape[3]) - b * output_shape[1]
            x = (mask // output_shape[3]) % output_shape[2]

            # transpose indices & reshape update values to one dimension
            updates_size = K.tf.size(updates)
            indices = K.transpose(K.reshape(
                K.stack([b, y, x, f]),
                [4, updates_size]))
            # indices_return = K.reshape(indices, (input_shape[0], 12, 4))
            # indices_return = K.cast(indices_return, 'float32')
            values = K.reshape(updates, [updates_size])
            ret = K.tf.scatter_nd(indices, values, output_shape)
            # ret = K.cast(ret, 'float32')
            return ret

    def compute_output_shape(self, input_shape):
        mask_shape = input_shape[1]
        return (
            mask_shape[0],
            mask_shape[1] * self.up_size[0],
            mask_shape[2] * self.up_size[1],
            mask_shape[3]
        )


if __name__ == '__main__':

    import keras
    import numpy as np

    # input = keras.layers.Input((4, 4, 3))
    # o = MaxPoolingWithArgmax2D()(input)
    # model = keras.Model(inputs=input, outputs=o)  # outputs=o
    # model.compile(optimizer="adam", loss='categorical_crossentropy')
    # x = np.random.randint(0, 100, (3, 4, 4, 3)) # 调试此处
    # m = model.predict(x) # 调试此处
    # print(m)

    input = keras.layers.Input((4, 4, 3))
    o = MaxPoolingWithArgmax2D()(input)
    o2 = MaxUnpooling2D()(o)
    model = keras.Model(inputs=input, outputs=o2)  # outputs=o
    model.compile(optimizer="adam", loss='categorical_crossentropy')
    x = np.random.randint(0, 100, (3, 4, 4, 3))# 调试此处
    m = model.predict(x)  # 调试此处
    print(m)

================================================
FILE: predict.py
================================================
import LoadBatches
from keras.models import load_model
from Models import FCN32, FCN8, SegNet, UNet
import glob
import cv2
import numpy as np
import random

n_classes = 11

key = "unet"

method = {
    "fcn32": FCN32.FCN32,
    "fcn8": FCN8.FCN8,
    "segnet": SegNet.SegNet,
    'unet': UNet.UNet}

images_path = "data/dataset1/images_prepped_test/"
segs_path = "data/dataset1/annotations_prepped_test/"

input_height = 320
input_width = 320

colors = [
    (random.randint(
        0, 255), random.randint(
            0, 255), random.randint(
                0, 255)) for _ in range(n_classes)]

##########################################################################


def label2color(colors, n_classes, seg):
    seg_color = np.zeros((seg.shape[0], seg.shape[1], 3))
    for c in range(n_classes):
        seg_color[:, :, 0] += ((seg == c) *
                               (colors[c][0])).astype('uint8')
        seg_color[:, :, 1] += ((seg == c) *
                               (colors[c][1])).astype('uint8')
        seg_color[:, :, 2] += ((seg == c) *
                               (colors[c][2])).astype('uint8')
    seg_color = seg_color.astype(np.uint8)
    return seg_color


def getcenteroffset(shape, input_height, input_width):
    short_edge = min(shape[:2])
    xx = int((shape[0] - short_edge) / 2)
    yy = int((shape[1] - short_edge) / 2)
    return xx, yy


images = sorted(
    glob.glob(
        images_path +
        "*.jpg") +
    glob.glob(
        images_path +
        "*.png") +
    glob.glob(
        images_path +
        "*.jpeg"))
segmentations = sorted(glob.glob(segs_path + "*.jpg") +
                       glob.glob(segs_path + "*.png") + glob.glob(segs_path + "*.jpeg"))


# m = load_model("output/%s_model.h5" % key)
m = method[key](11, 320, 320)  # 有自定义层时,不能直接加载模型
m.load_weights("output/%s_model.h5" % key)

for i, (imgName, segName) in enumerate(zip(images, segmentations)):

    print("%d/%d %s" % (i + 1, len(images), imgName))

    im = cv2.imread(imgName, 1)
    # im=cv2.resize(im,(input_height,input_width))
    xx, yy = getcenteroffset(im.shape, input_height, input_width)
    im = im[xx:xx + input_height, yy:yy + input_width, :]

    seg = cv2.imread(segName, 0)
    # seg= cv2.resize(seg,interpolation=cv2.INTER_NEAREST)
    seg = seg[xx:xx + input_height, yy:yy + input_width]

    pr = m.predict(np.expand_dims(LoadBatches.getImageArr(im), 0))[0]
    pr = pr.reshape((input_height, input_width, n_classes)).argmax(axis=2)

    cv2.imshow("img", im)
    cv2.imshow("seg_predict_res", label2color(colors, n_classes, pr))
    cv2.imshow("seg", label2color(colors, n_classes, seg))

    cv2.waitKey()


================================================
FILE: readme.md
================================================
<h1 align="center"><a href="https://github.com/lsh1994/keras-segmentation" target="_blank">keras-segmentation</a></h1>

<p align="center">
Implementation is not original papers. The purpose of this project is to get started with semantic segmentation and master the basic process.
</p>

<font color=red>FCN32/8、SegNet、U-Net [Model published](https://github.com/lsh1994/keras-segmentation/releases). Thank you for your support.</font> 

[中文说明](readme_zh.md)

## Environment
  
Item     | Value | Item     | Value
:---: | :---: | :---: | :---: 
keras | 2.2.4  | OS | win10
tensorflow-gpu | 1.10/1.12 | Python| 3.6.7

## Reference
  
https://github.com/divamgupta/image-segmentation-keras  
https://github.com/ykamikawa/SegNet  
  
Data:  
https://drive.google.com/file/d/0B0d9ZiqAgFkiOHR1NTJhWVJMNEU/view?usp=sharing  
Data or:  
https://github.com/alexgkendall/SegNet-Tutorial/tree/master/CamVid

## Project Strcutre  
![在这里插入图片描述](https://img-blog.csdnimg.cn/20181218212010847.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L25pbWExOTk0,size_16,color_FFFFFF,t_70)

`python  visualizeDataset.py`: Visual samples  
![在这里插入图片描述](https://img-blog.csdnimg.cn/20181113165336706.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L25pbWExOTk0,size_16,color_FFFFFF,t_70)

`python train.py`: Execution train    
`python predict.py`: Execution predict  

You can modify the parameter in project switching model or cloning the historical version.

## About

### FCN32

Visualization results:    
![在这里插入图片描述](https://img-blog.csdnimg.cn/2018111410255134.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L25pbWExOTk0,size_16,color_FFFFFF,t_70)

### FCN8

Visualization results:   
![在这里插入图片描述](https://img-blog.csdnimg.cn/20181114103306961.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L25pbWExOTk0,size_16,color_FFFFFF,t_70)


### SegNet

### U-Net


  

================================================
FILE: readme_zh.md
================================================
<h1 align="center"><a href="https://github.com/lsh1994/keras-segmentation" target="_blank">keras-segmentation</a></h1>

<p align="center">
非论文的原始实现。本工程目的是用于语义分割入门,掌握基本流程。
</p>

<font color=red>已经发布模型FCN32/8、SegNet、U-Net [对应提交](https://github.com/lsh1994/keras-segmentation/releases)。</font>

## 实验环境

  
Item     | Value | Item     | Value
:---: | :---: | :---: | :---: 
keras | 2.2.4  | OS | win10
tensorflow-gpu | 1.10/1.12 | Python| 3.6.7

## 参考 

https://github.com/divamgupta/image-segmentation-keras  
https://github.com/ykamikawa/SegNet  
  
实验数据:  
https://drive.google.com/file/d/0B0d9ZiqAgFkiOHR1NTJhWVJMNEU/view?usp=sharing  
实验数据或者:  
https://github.com/alexgkendall/SegNet-Tutorial/tree/master/CamVid

## 文件结构  
![在这里插入图片描述](https://img-blog.csdnimg.cn/20181218212010847.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L25pbWExOTk0,size_16,color_FFFFFF,t_70)

`python  visualizeDataset.py` 可视化样本:
![在这里插入图片描述](https://img-blog.csdnimg.cn/20181113165336706.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L25pbWExOTk0,size_16,color_FFFFFF,t_70)

`python train.py`:训练  
`python predict.py`:预测  
请自行修改代码中相关参数切换模型,或者克隆历史版本。

## 详细描述

### FCN32

[文章参考](https://blog.csdn.net/nima1994/article/details/84031759)  
可视化结果:  
![在这里插入图片描述](https://img-blog.csdnimg.cn/2018111410255134.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L25pbWExOTk0,size_16,color_FFFFFF,t_70)

### FCN8

[文章参考](https://blog.csdn.net/nima1994/article/details/84062253)  
可视化结果:  
![在这里插入图片描述](https://img-blog.csdnimg.cn/20181114103306961.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L25pbWExOTk0,size_16,color_FFFFFF,t_70)


### SegNet
[文章参考](https://blog.csdn.net/nima1994/article/details/85079510)  

### U-Net
[文章参考](https://blog.csdn.net/nima1994/article/details/86300172)


  

================================================
FILE: train.py
================================================
from keras.callbacks import ModelCheckpoint, TensorBoard

import LoadBatches
from Models import FCN8, FCN32, SegNet, UNet
from keras import optimizers
import math

#############################################################################
train_images_path = "data/dataset1/images_prepped_train/"
train_segs_path = "data/dataset1/annotations_prepped_train/"
train_batch_size = 8
n_classes = 11

epochs = 500

input_height = 320
input_width = 320


val_images_path = "data/dataset1/images_prepped_test/"
val_segs_path = "data/dataset1/annotations_prepped_test/"
val_batch_size = 8

key = "unet"


##################################

method = {
    "fcn32": FCN32.FCN32,
    "fcn8": FCN8.FCN8,
    'segnet': SegNet.SegNet,
    'unet': UNet.UNet}

m = method[key](n_classes, input_height=input_height, input_width=input_width)
m.compile(
    loss='categorical_crossentropy',
    optimizer="adadelta",
    metrics=['acc'])

G = LoadBatches.imageSegmentationGenerator(train_images_path,
                                           train_segs_path, train_batch_size, n_classes=n_classes, input_height=input_height, input_width=input_width)

G_test = LoadBatches.imageSegmentationGenerator(val_images_path,
                                                val_segs_path, val_batch_size, n_classes=n_classes, input_height=input_height, input_width=input_width)

checkpoint = ModelCheckpoint(
    filepath="output/%s_model.h5" %
    key,
    monitor='acc',
    mode='auto',
    save_best_only='True')
tensorboard = TensorBoard(log_dir='output/log_%s_model' % key)

m.fit_generator(generator=G,
                steps_per_epoch=math.ceil(367. / train_batch_size),
                epochs=epochs, callbacks=[checkpoint, tensorboard],
                verbose=2,
                validation_data=G_test,
                validation_steps=8,
                shuffle=True)


================================================
FILE: visualizeDataset.py
================================================
import glob
import numpy as np
import cv2
import random
from skimage import color, exposure


def imageSegmentationGenerator(images_path, segs_path, n_classes):

    assert images_path[-1] == '/'
    assert segs_path[-1] == '/'

    images = sorted(
        glob.glob(
            images_path +
            "*.jpg") +
        glob.glob(
            images_path +
            "*.png") +
        glob.glob(
            images_path +
            "*.jpeg"))
    segmentations = sorted(glob.glob(
        segs_path + "*.jpg") + glob.glob(segs_path + "*.png") + glob.glob(segs_path + "*.jpeg"))

    colors = [
        (random.randint(
            0, 255), random.randint(
            0, 255), random.randint(
                0, 255)) for _ in range(n_classes)]

    assert len(images) == len(segmentations)

    for im_fn, seg_fn in zip(images, segmentations):

        img = cv2.imread(im_fn)
        seg = cv2.imread(seg_fn)
        print(np.unique(seg))

        seg_img = np.zeros_like(seg)

        for c in range(n_classes):
            seg_img[:, :, 0] += ((seg[:, :, 0] == c) *
                                 (colors[c][0])).astype('uint8')
            seg_img[:, :, 1] += ((seg[:, :, 0] == c) *
                                 (colors[c][1])).astype('uint8')
            seg_img[:, :, 2] += ((seg[:, :, 0] == c) *
                                 (colors[c][2])).astype('uint8')

        eqaimg = color.rgb2hsv(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        eqaimg[:, :, 2] = exposure.equalize_hist(eqaimg[:, :, 2])
        eqaimg = color.hsv2rgb(eqaimg)

        cv2.imshow("img", img)
        cv2.imshow("seg_img", seg_img)
        cv2.imshow(
            "equalize_hist_img",
            cv2.cvtColor(
                (eqaimg *
                 255.).astype(
                    np.uint8),
                cv2.COLOR_RGB2BGR))
        cv2.waitKey()


images = "data/dataset1/images_prepped_train/"
annotations = "data/dataset1/annotations_prepped_train/"
n_classes = 11

imageSegmentationGenerator(images, annotations, n_classes)
Download .txt
gitextract_sm4wmp9m/

├── .gitignore
├── LoadBatches.py
├── Models/
│   ├── FCN32.py
│   ├── FCN8.py
│   ├── SegNet.py
│   ├── UNet.py
│   ├── utils.py
│   └── utils_cpu.py
├── predict.py
├── readme.md
├── readme_zh.md
├── train.py
└── visualizeDataset.py
Download .txt
SYMBOL INDEX (29 symbols across 9 files)

FILE: LoadBatches.py
  function getImageArr (line 9) | def getImageArr(im):
  function getSegmentationArr (line 20) | def getSegmentationArr(seg, nClasses, input_height, input_width):
  function imageSegmentationGenerator (line 31) | def imageSegmentationGenerator(images_path, segs_path, batch_size,

FILE: Models/FCN32.py
  function FCN32 (line 7) | def FCN32(nClasses, input_height, input_width):

FILE: Models/FCN8.py
  function FCN8_helper (line 6) | def FCN8_helper(nClasses, input_height, input_width):
  function FCN8 (line 51) | def FCN8(nClasses, input_height, input_width):

FILE: Models/SegNet.py
  function SegNet (line 12) | def SegNet(nClasses, input_height, input_width):

FILE: Models/UNet.py
  function UNet (line 13) | def UNet(nClasses, input_height, input_width):

FILE: Models/utils.py
  class MaxPoolingWithArgmax2D (line 11) | class MaxPoolingWithArgmax2D(Layer):
    method __init__ (line 13) | def __init__(
    method call (line 24) | def call(self, inputs, **kwargs):
    method compute_output_shape (line 44) | def compute_output_shape(self, input_shape):
    method compute_mask (line 53) | def compute_mask(self, inputs, mask=None):
  class MaxUnpooling2D (line 57) | class MaxUnpooling2D(Layer):
    method __init__ (line 58) | def __init__(self, up_size=(2, 2), **kwargs):
    method call (line 62) | def call(self, inputs, output_shape=None):
    method compute_output_shape (line 99) | def compute_output_shape(self, input_shape):

FILE: Models/utils_cpu.py
  class MaxPoolingWithArgmax2D (line 11) | class MaxPoolingWithArgmax2D(Layer):
    method __init__ (line 13) | def __init__(
    method call (line 24) | def call(self, inputs, **kwargs):
    method compute_output_shape (line 44) | def compute_output_shape(self, input_shape):
    method compute_mask (line 53) | def compute_mask(self, inputs, mask=None):
  class MaxUnpooling2D (line 57) | class MaxUnpooling2D(Layer):
    method __init__ (line 58) | def __init__(self, up_size=(2, 2), **kwargs):
    method call (line 62) | def call(self, inputs, output_shape=None):
    method compute_output_shape (line 102) | def compute_output_shape(self, input_shape):

FILE: predict.py
  function label2color (line 34) | def label2color(colors, n_classes, seg):
  function getcenteroffset (line 47) | def getcenteroffset(shape, input_height, input_width):

FILE: visualizeDataset.py
  function imageSegmentationGenerator (line 8) | def imageSegmentationGenerator(images_path, segs_path, n_classes):
Condensed preview — 13 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (36K chars).
[
  {
    "path": ".gitignore",
    "chars": 60,
    "preview": "/data\n/__pycache__\n/.idea\n/test\n/output\n/Models/__pycache__\n"
  },
  {
    "path": "LoadBatches.py",
    "chars": 2260,
    "preview": "import numpy as np\nimport cv2\nimport glob\nimport itertools\nimport matplotlib.pyplot as plt\nimport random\n\n\ndef getImageA"
  },
  {
    "path": "Models/FCN32.py",
    "chars": 1583,
    "preview": "from keras.applications import vgg16\nfrom keras.models import Model, Sequential\nfrom keras.layers import Conv2D, Conv2DT"
  },
  {
    "path": "Models/FCN8.py",
    "chars": 2653,
    "preview": "from keras.applications import vgg16\nfrom keras.models import Model, Sequential\nfrom keras.layers import Conv2D, Conv2DT"
  },
  {
    "path": "Models/SegNet.py",
    "chars": 5202,
    "preview": "\"\"\"\n@author: LiShiHang\n@software: PyCharm\n@file: SegNet.py\n@time: 2018/12/18 14:58\n\"\"\"\nfrom keras import Model,layers\nfr"
  },
  {
    "path": "Models/UNet.py",
    "chars": 2155,
    "preview": "\"\"\"\n@author: LiShiHang\n@software: PyCharm\n@file: UNet.py\n@time: 2018/12/27 16:54\n@desc:\n\"\"\"\nfrom keras import Model, lay"
  },
  {
    "path": "Models/utils.py",
    "chars": 4383,
    "preview": "\"\"\"\n@author: LiShiHang\n@software: PyCharm\n@file: utils.py\n@time: 2018/12/18 14:58\n\"\"\"\nfrom keras.engine import Layer\nimp"
  },
  {
    "path": "Models/utils_cpu.py",
    "chars": 4686,
    "preview": "\"\"\"\n@author: LiShiHang\n@software: PyCharm\n@file: utils.py\n@time: 2018/12/18 14:58\n\"\"\"\nfrom keras.engine import Layer\nimp"
  },
  {
    "path": "predict.py",
    "chars": 2656,
    "preview": "import LoadBatches\nfrom keras.models import load_model\nfrom Models import FCN32, FCN8, SegNet, UNet\nimport glob\nimport c"
  },
  {
    "path": "readme.md",
    "chars": 2057,
    "preview": "<h1 align=\"center\"><a href=\"https://github.com/lsh1994/keras-segmentation\" target=\"_blank\">keras-segmentation</a></h1>\n\n"
  },
  {
    "path": "readme_zh.md",
    "chars": 1982,
    "preview": "<h1 align=\"center\"><a href=\"https://github.com/lsh1994/keras-segmentation\" target=\"_blank\">keras-segmentation</a></h1>\n\n"
  },
  {
    "path": "train.py",
    "chars": 1855,
    "preview": "from keras.callbacks import ModelCheckpoint, TensorBoard\n\nimport LoadBatches\nfrom Models import FCN8, FCN32, SegNet, UNe"
  },
  {
    "path": "visualizeDataset.py",
    "chars": 2038,
    "preview": "import glob\nimport numpy as np\nimport cv2\nimport random\nfrom skimage import color, exposure\n\n\ndef imageSegmentationGener"
  }
]

About this extraction

This page contains the full source code of the lsh1994/keras-segmentation GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 13 files (32.8 KB), approximately 9.8k tokens, and a symbol index with 29 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!