Repository: yaojieliu/ECCV2018-FaceDeSpoofing
Branch: master
Commit: a405004e7767
Files: 12
Total size: 56.7 KB
Directory structure:
gitextract_qtmt8y7o/
├── Architecture.py
├── LICENSE
├── README.md
├── Train.py
├── data_train.py
├── facepad-test.py
├── lib/
│ ├── saved_model.pb
│ └── variables/
│ ├── variables.data-00000-of-00001
│ └── variables.index
└── score/
├── 1_1_36_1.score
├── 1_1_36_3.score
└── 1_1_36_5.score
================================================
FILE CONTENTS
================================================
================================================
FILE: Architecture.py
================================================
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import numpy as np
import tensorflow.contrib.layers as layers
import data_train
FLAGS = tf.app.flags.FLAGS
arg_scope = tf.contrib.framework.arg_scope
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 6,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/data/train_demo',
"""Path to the Anti-Spoofing data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = data_train.IMAGE_SIZE
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = data_train.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = data_train.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 15.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.8 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.00003 # Initial learning rate.
R_FOR_LSE = 10
TOWER_NAME = 'tower'
def _activation_summary(x):
"""
nothing
"""
#
#
print(x.shape)
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
def _variable_on_cpu(name, shape, initializer):
"""
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputsB(a):
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = FLAGS.data_dir
if a==1:
images, dmaps, labels, sizes, slabels = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=FLAGS.batch_size)
else:
images, dmaps, labels, sizes, slabels = cifar10_input.distorted_inputsA(data_dir=data_dir, batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
dmaps = tf.case(images, tf.float16)
return images, dmaps, labels, sizes, slabels
def inputs(testset):
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = FLAGS.data_dir
images, dmaps, labels, sizes, slabels = cifar10_input.inputs(testset = testset,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
dmaps = tf.case(images, tf.float16)
return images, dmaps, labels, sizes, slabels
def inference(images, size,labels, training_nn, training_class, _reuse):
#
#
batch_norm_decay = 0.9
batch_norm_epsilon = 1e-5
batch_norm_scale = True
batch_norm_params = {
'is_training': training_nn,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': None, #
}
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = tf.constant_initializer(0.0),
activation_fn=tf.nn.elu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params,
trainable = training_nn,
reuse=_reuse,
padding='SAME',
stride=1):
conv0 = layers.conv2d(images,num_outputs = 64, scope='SecondAMIN/conv0')
with tf.name_scope('convBlock-1') as scope:
conv1 = layers.conv2d(conv0,num_outputs = 128, scope='SecondAMIN/conv1')
bconv1 = layers.conv2d(conv1,num_outputs = 196, scope='SecondAMIN/bconv1')
conv2 = layers.conv2d(bconv1, num_outputs = 128, scope='SecondAMIN/conv2')
pool1 = layers.max_pool2d(conv2, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool1')
_activation_summary(conv1)
_activation_summary(bconv1)
_activation_summary(conv2)
with tf.name_scope('convBlock-2') as scope:
conv3 = layers.conv2d(pool1, num_outputs = 128, scope='SecondAMIN/conv3')
bconv2 = layers.conv2d(conv3, num_outputs = 196, scope='SecondAMIN/bconv2')
conv4 = layers.conv2d(bconv2, num_outputs = 128, scope='SecondAMIN/conv4')
pool2 = layers.max_pool2d(conv4, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool2')
_activation_summary(conv3)
_activation_summary(bconv2)
_activation_summary(conv4)
with tf.name_scope('convBlock-3') as scope:
conv5 = layers.conv2d(pool2, num_outputs = 128, scope='SecondAMIN/conv5')
bconv3 = layers.conv2d(conv5, num_outputs = 196, scope='SecondAMIN/bconv3')
conv6 = layers.conv2d(bconv3, num_outputs = 128, scope='SecondAMIN/conv6')
pool3 = layers.avg_pool2d(conv6, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool3')
_activation_summary(conv5)
_activation_summary(bconv3)
_activation_summary(conv6)
map1 = tf.image.resize_images(pool1,[32,32])
map2 = tf.image.resize_images(pool2,[32,32])
map3 = tf.image.resize_images(pool3,[32,32])
summap = tf.concat([map1, map2, map3],3)
#
with tf.name_scope('Depth-Map-Block') as scope:
conv7 = layers.conv2d(summap, num_outputs = 128, scope='SecondAMIN/conv7')
dp1 = tf.layers.dropout(conv7,rate = 0.2, training = training_nn, name = 'SecondAMIN/dropout1')
conv8 = layers.conv2d(dp1, num_outputs = 64, scope='SecondAMIN/conv8')
_activation_summary(conv7)
_activation_summary(conv8)
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = tf.constant_initializer(0.0),
activation_fn= None,
normalizer_fn= None,
padding='SAME',
trainable = training_nn,
reuse=_reuse,
stride=1):
#
conv11 = layers.conv2d(conv8, num_outputs = 1, scope='SecondAMIN/conv11')
_activation_summary(conv11)
tf.summary.image('depthMap_Second', conv11, max_outputs=FLAGS.batch_size)
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = tf.constant_initializer(0.0),
activation_fn=tf.nn.elu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params,
trainable = training_nn,
reuse=_reuse,
padding='SAME',
stride=1):
conv0_fir = layers.conv2d(images,num_outputs = 24, scope='FirstAMIN/conv0') #
pool1_fir = layers.max_pool2d(conv0_fir, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='FirstAMIN/pool1')
with tf.name_scope('convBlock-1_fir') as scope:
conv1_fir = layers.conv2d(pool1_fir,num_outputs = 20, scope='FirstAMIN/conv1')#
bconv1_fir = layers.conv2d(conv1_fir,num_outputs = 25, scope='FirstAMIN/bconv1')#
conv2_fir = layers.conv2d(bconv1_fir, num_outputs = 20, scope='FirstAMIN/conv2')#
with tf.name_scope('convBlock-2_fir') as scope:
pool2_fir = layers.max_pool2d(conv2_fir, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='FirstAMIN/pool2')
conv3_fir = layers.conv2d(pool2_fir, num_outputs = 20, scope='FirstAMIN/conv3')
bconv2_fir = layers.conv2d(conv3_fir, num_outputs = 25, scope='FirstAMIN/bconv2')
conv4_fir = layers.conv2d(bconv2_fir, num_outputs = 20, scope='FirstAMIN/conv4')
with tf.name_scope('convBlock-3_fir') as scope:
pool3_fir = layers.avg_pool2d(conv4_fir, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='FirstAMIN/pool3')
conv5_fir = layers.conv2d(pool3_fir, num_outputs = 20, scope='FirstAMIN/conv5')
bconv3_fir = layers.conv2d(conv5_fir, num_outputs = 25, scope='FirstAMIN/bconv3')
conv6_fir = layers.conv2d(bconv3_fir, num_outputs = 20, scope='FirstAMIN/conv6')
map1_fir = tf.image.resize_images(conv2_fir,[32,32])
map2_fir = tf.image.resize_images(conv4_fir,[32,32])
map3_fir = conv6_fir
summap_fir = tf.concat([map1_fir, map2_fir, map3_fir],3)
#
with tf.name_scope('Depth-Map-Block_fir') as scope:
conv7_fir = layers.conv2d(summap_fir, num_outputs = 28, scope='FirstAMIN/conv7')
dp1_fir = tf.layers.dropout(conv7_fir,rate = 0, training = training_nn, name = 'FirstAMIN/dropout2')
conv8_fir = layers.conv2d(dp1_fir, num_outputs =16 , scope='FirstAMIN/conv8')
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = None, #
activation_fn= None,
normalizer_fn= None,
padding='SAME',
reuse=_reuse,
stride=1):
#
conv11_fir = layers.conv2d(conv8_fir, num_outputs = 1, scope='FirstAMIN/conv11')
tf.summary.image('ZeroOneMap', tf.cast(256*conv11_fir,tf.uint8), max_outputs=FLAGS.batch_size)
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = tf.constant_initializer(0.0),
activation_fn=tf.nn.elu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params,
trainable = training_nn,
padding='SAME',
reuse=_reuse,
stride=1):
#
with tf.name_scope('Score-Map-Block09') as scope:
summap_fir = tf.image.resize_images(summap_fir,[256,256])
conv9_fir = layers.conv2d(summap_fir, num_outputs = 28, scope='FirstAMIN/conv9')
conv10_fir = layers.conv2d(conv9_fir, num_outputs = 24, scope='FirstAMIN/conv10')
#
conv12_fir = layers.conv2d(conv10_fir, num_outputs = 20, scope='FirstAMIN/conv12')
conv13_fir = layers.conv2d(conv12_fir, num_outputs = 20, scope='FirstAMIN/conv13')
#
conv14_fir = layers.conv2d(conv13_fir, num_outputs = 20, scope='FirstAMIN/conv14')
conv15_fir = layers.conv2d(conv14_fir, num_outputs = 16, scope='FirstAMIN/conv15')
#
conv16_fir = layers.conv2d(conv15_fir, num_outputs = 16, scope='FirstAMIN/conv16')
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.002),
biases_initializer = None, #tf.constant_initializer(0.0),
activation_fn= None,
normalizer_fn= None,
padding='SAME',
reuse=_reuse,
stride=1):
conv17 = layers.conv2d(conv16_fir, num_outputs = 6, scope='FirstAMIN/conv17')
thirdPart_comp_1 = tf.complex(conv17, tf.zeros_like(conv17))
thirdPart_comp_1=tf.transpose(thirdPart_comp_1, perm=[0,3,1,2])
thirdPart_fft_1=tf.abs(tf.fft2d(thirdPart_comp_1, name='summap_fft_real_1'))
thirdPart_fft_1=tf.transpose(thirdPart_fft_1, perm=[0,2,3,1])
thirdPart_fft_1=tf.log1p(thirdPart_fft_1[:,32:256-32,32:256-32,:])
#
Live_est1= images-conv17/45
Live_est_mask = tf.cast(tf.greater(Live_est1,0),tf.float32)
Live_est=Live_est1*Live_est_mask
#
#################################################################################################################################
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = tf.constant_initializer(0.0),
activation_fn=tf.nn.elu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params,
trainable = training_nn,
padding='SAME',
reuse=_reuse,
stride=1):
# Score Map Branch
with tf.name_scope('Score-Map-Block1_dis') as scope:
conv9_dis = layers.conv2d(Live_est, num_outputs = 24, scope='ThirdAMIN/conv9')
conv10_dis = layers.conv2d(conv9_dis, num_outputs = 20, scope='ThirdAMIN/conv10')
pool1_dis = layers.max_pool2d(conv10_dis, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool1')
conv12_dis = layers.conv2d(pool1_dis, num_outputs = 20, scope='ThirdAMIN/conv12')
conv13_dis = layers.conv2d(conv12_dis, num_outputs = 16, scope='ThirdAMIN/conv13')
pool2_dis = layers.max_pool2d(conv13_dis, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool2')
conv14_dis = layers.conv2d(pool2_dis, num_outputs = 12, scope='ThirdAMIN/conv14')
conv15_dis = layers.conv2d(conv14_dis, num_outputs = 6, scope='ThirdAMIN/conv15')
pool3_dis = layers.max_pool2d(conv15_dis, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool3')
conv16_dis = layers.conv2d(pool3_dis, num_outputs = 1, scope='ThirdAMIN/conv16')
conv20_dis=tf.reshape(conv16_dis, [6,32*32])
sc333_dis = layers.fully_connected(conv20_dis, num_outputs = 100, reuse=_reuse, scope='ThirdAMIN/bconv15_sc333_dis')
dp1_dis = tf.layers.dropout(sc333_dis,rate = 0.2, training = training_nn, name = 'dropout3')
sc = layers.fully_connected(dp1_dis, num_outputs = 2, reuse=_reuse,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = None, #tf.constant_initializer(0.0),
activation_fn= None,
normalizer_fn= None,scope='ThirdAMIN/bconv10_sc')
conv9_dis2 = layers.conv2d(images, num_outputs = 24, reuse= True, scope='ThirdAMIN/conv9')
conv10_dis2 = layers.conv2d(conv9_dis2, num_outputs = 20, reuse= True, scope='ThirdAMIN/conv10')
pool1_dis2 = layers.max_pool2d(conv10_dis2, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool1')
conv12_dis2 = layers.conv2d(pool1_dis2, num_outputs = 20,reuse= True, scope='ThirdAMIN/conv12')
conv13_dis2 = layers.conv2d(conv12_dis2, num_outputs = 16, reuse= True, scope='ThirdAMIN/conv13')
pool2_dis2 = layers.max_pool2d(conv13_dis2, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool2')
conv14_dis2 = layers.conv2d(pool2_dis2, num_outputs = 12, reuse= True, scope='ThirdAMIN/conv14')
conv15_dis2 = layers.conv2d(conv14_dis2, num_outputs = 6, reuse= True, scope='ThirdAMIN/conv15')
pool3_dis2 = layers.max_pool2d(conv15_dis2, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool3')
conv16_dis2 = layers.conv2d(pool3_dis2, num_outputs = 1, reuse= True, scope='ThirdAMIN/conv16')
conv20_dis2=tf.reshape(conv16_dis2, [6,32*32])
sc333_dis2 = layers.fully_connected(conv20_dis2, reuse= True, num_outputs = 100,scope='ThirdAMIN/bconv15_sc333_dis')
dp1_dis2 = tf.layers.dropout(sc333_dis2,rate = 0.2, training = training_nn, name = 'dropout4')
sc2 = layers.fully_connected(dp1_dis2, num_outputs = 2, reuse= True,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = None, #tf.constant_initializer(0.0),
activation_fn= None,
normalizer_fn= None,scope='ThirdAMIN/bconv10_sc')
##################################################################################################################################
batch_norm_decay = 0.9
batch_norm_epsilon = 1e-5
batch_norm_scale = True
batch_norm_params = {
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': None, #
'trainable':False,
#'reuse':True
}
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = tf.constant_initializer(0.0),
activation_fn=tf.nn.elu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params,
trainable = False,
padding='SAME',
reuse=True,
stride=1):
#################################################################################################################################
conv0_new = layers.conv2d(Live_est,num_outputs = 64, scope='SecondAMIN/conv0')
with tf.name_scope('convBlock-1_new') as scope:
conv1_new = layers.conv2d(conv0_new,num_outputs = 128, scope='SecondAMIN/conv1')
bconv1_new = layers.conv2d(conv1_new,num_outputs = 196, scope='SecondAMIN/bconv1')
conv2_new = layers.conv2d(bconv1_new, num_outputs = 128, scope='SecondAMIN/conv2')
pool1_new = layers.max_pool2d(conv2_new, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool1')
with tf.name_scope('convBlock-2_new') as scope:
conv3_new = layers.conv2d(pool1_new, num_outputs = 128, scope='SecondAMIN/conv3')
bconv2_new = layers.conv2d(conv3_new, num_outputs = 196, scope='SecondAMIN/bconv2')
conv4_new = layers.conv2d(bconv2_new, num_outputs = 128, scope='SecondAMIN/conv4')
pool2_new = layers.max_pool2d(conv4_new, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool2')
with tf.name_scope('convBlock-3_new') as scope:
conv5_new = layers.conv2d(pool2_new, num_outputs = 128, scope='SecondAMIN/conv5')
bconv3_new = layers.conv2d(conv5_new, num_outputs = 196, scope='SecondAMIN/bconv3')
conv6_new = layers.conv2d(bconv3_new, num_outputs = 128, scope='SecondAMIN/conv6')
pool3_new = layers.avg_pool2d(conv6_new, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool3')
map1_new = tf.image.resize_images(pool1_new,[32,32])
map2_new = tf.image.resize_images(pool2_new,[32,32])
map3_new = tf.image.resize_images(pool3_new,[32,32])
summap_new = tf.concat([map1_new, map2_new, map3_new],3)
# Depth Map Branch
with tf.name_scope('Depth-Map-Block_new') as scope:
conv7_new = layers.conv2d(summap_new, num_outputs = 128, scope='SecondAMIN/conv7')
dp1_new = tf.layers.dropout(conv7_new,rate = 0.2, training = training_nn, name = 'SecondAMIN/dropout1')
conv8_new = layers.conv2d(dp1_new, num_outputs = 64, scope='SecondAMIN/conv8')
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = tf.constant_initializer(0.0),
activation_fn= None,
normalizer_fn= None,
padding='SAME',
trainable = False,
reuse=True,
stride=1):
# Depth Map Branch
conv11_new = layers.conv2d(conv8_new, num_outputs = 1, scope='SecondAMIN/conv11')
label_Amin1=size
LabelsWholeImage=tf.cast(np.ones([6,32,32,1]), tf.float32)
LabelsWholeImage2=LabelsWholeImage*tf.reshape(tf.cast(1-label_Amin1,tf.float32),[6,1,1,1])
LabelsWholeImage=labels*tf.reshape(tf.cast(label_Amin1,tf.float32),[6,1,1,1])
Z_GT2=np.zeros([6,3,3,1])
Z_GT2[:,1,1,:]=1
GT2=tf.cast(Z_GT2, tf.float32)
tf.summary.image('GT2', LabelsWholeImage[:,:,:,0:1], max_outputs=FLAGS.batch_size)
tf.summary.image('SC', tf.cast(256*conv11[:,:,:,0:1],tf.uint8), max_outputs=FLAGS.batch_size)
tf.summary.image('Live_SC', tf.cast(256*conv11_new[:,:,:,0:1],tf.uint8), max_outputs=FLAGS.batch_size)
tf.summary.image('Live', tf.cast(256*Live_est[:,:,:,3:6],tf.uint8), max_outputs=FLAGS.batch_size)
tf.summary.image('inputImage', tf.cast(256*images[:,:,:,3:6],tf.uint8), max_outputs=FLAGS.batch_size)
tf.summary.image('GT3_Artifact', LabelsWholeImage2[:,:,:,0:1], max_outputs=FLAGS.batch_size)
tf.summary.image('Artifact', conv17[:,:,:,3:6], max_outputs=FLAGS.batch_size)
return Live_est, conv17, conv11, GT2,conv17,images,thirdPart_fft_1,LabelsWholeImage, conv11_new,conv11_new , LabelsWholeImage2, sc, sc2, conv11_fir
#
def lossSecond(dmaps, smaps, labels, slabels, sc,GT2, fftmapA, A, B,bin_labels, bin_labels2,Nsc,Lsc,sc_fake, sc_real):
#
with tf.name_scope('DR_Net_Training') as scope:
mean_squared_loss = tf.reduce_mean(
tf.reduce_mean(tf.abs(tf.subtract(sc,bin_labels2)),
reduction_indices = 2),
reduction_indices = 1)
loss2 = tf.reduce_mean(mean_squared_loss, name='pixel_loss1')*1
tf.summary.scalar('Loss',loss2)
tf.add_to_collection('losses', loss2)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def lossThird(dmaps, smaps, labels, slabels, sc,GT2, fftmapA, A, B,bin_labels, bin_labels2,Nsc,Lsc,Allsc,sc_fake, sc_real):
with tf.name_scope('GAN_Training') as scope:
bin_labels3=tf.ones([6,1])
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(tf.cast(bin_labels3,tf.int32),[-1]), logits= tf.cast(sc_fake, tf.float32),
name='cross_entropy_per_example') # logits = (N,2) label = (N,) tf.reshape(label,[-1])
loss22 = tf.reduce_mean(cross_entropy, name='classification_loss2')*1
tf.add_to_collection('losses', loss22)
bin_labels3=tf.zeros([6,1])
bin_labels_1=tf.cast(sc_real, tf.float32)*tf.cast(bin_labels,tf.float32)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(tf.cast(bin_labels3,tf.int32),[-1]), logits= bin_labels_1,
name='cross_entropy_per_example2') # logits = (N,2) label = (N,) tf.reshape(label,[-1])
loss23 = tf.reduce_mean(cross_entropy, name='classification_loss3')*1
tf.summary.scalar('Loss',loss23+loss22)
tf.add_to_collection('losses', loss23)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def lossFirst(dmaps, smaps, labels, slabels, sc,GT2, fftmapA, A, B,bin_labels, bin_labels2,Nsc,Lsc,Allsc,sc_fake, sc_real, conv11_fir):
with tf.name_scope('Zero_One_Map_loss') as scope:
mean_squared_loss = tf.reduce_mean(
tf.reduce_mean(((tf.abs(tf.subtract(Allsc,conv11_fir)))),
reduction_indices = 2),
reduction_indices = 1)
loss823 = tf.reduce_mean(mean_squared_loss, name='pixel_loss823')*6000
tf.summary.scalar('Loss',loss823)
tf.add_to_collection('losses', loss823)
with tf.name_scope('Dr_Net_Backpropagate') as scope:
bin_labels23=labels #tf.zeros_like(bin_labels2)
mean_squared_loss = tf.reduce_mean(
tf.reduce_mean(tf.abs(tf.subtract(Lsc,bin_labels23)),
reduction_indices = 2),
reduction_indices = 1)
loss32 = tf.reduce_mean(mean_squared_loss, name='pixel_loss32')*600
tf.summary.scalar('Loss',loss32)
tf.add_to_collection('losses', loss32)
with tf.name_scope('GAN_Backpropagate') as scope:
bin_labelsE=tf.zeros([6,1])
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(tf.cast(bin_labelsE,tf.int32),[-1]), logits= tf.cast(sc_fake, tf.float32),
name='cross_entropy_per_example')
loss22 = tf.reduce_mean(cross_entropy, name='classification_loss2')*1*100
tf.summary.scalar('Loss',loss22)
tf.add_to_collection('losses', loss22)
with tf.name_scope('Live_Repetitive_Pattern') as scope:
mean_squared_loss = tf.reduce_max(
tf.reduce_max(B,
reduction_indices = 2),
reduction_indices = 1)
#
bin_labels_1=tf.cast(bin_labels,tf.float32)
bin_labels9= tf.concat([bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1],1)
mean_squared_loss=mean_squared_loss*(bin_labels9)
loss81= tf.reduce_mean(mean_squared_loss, name='pixel_loss81')*1
tf.summary.scalar('Loss',loss81)
tf.add_to_collection('losses', loss81)
with tf.name_scope('Spoof_Repetitive_Pattern') as scope:
mean_squared_loss = tf.reduce_max(
tf.reduce_max(B,
reduction_indices = 2),
reduction_indices = 1)
bin_labels_1=tf.cast(1-bin_labels,tf.float32)
bin_labels9= tf.concat([bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1],1)
mean_squared_loss2=mean_squared_loss*(bin_labels9)
mean_squared_loss=-mean_squared_loss2#
loss812= tf.reduce_mean(mean_squared_loss, name='pixel_loss812')*1*2
tf.summary.scalar('Loss',loss812)
tf.add_to_collection('losses', loss812)
with tf.name_scope('Live_Images_Estimation') as scope:
mean_squared_loss = tf.reduce_mean(
tf.reduce_mean(((tf.abs(tf.subtract(A,dmaps)))),
reduction_indices = 2),
reduction_indices = 1)
bin_labels_1=tf.cast(bin_labels,tf.float32)
bin_labels8= tf.concat([bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1],1)
mean_squared_loss=mean_squared_loss*(bin_labels8)
loss8 = tf.reduce_mean(mean_squared_loss, name='pixel_loss8')*150*300
tf.summary.scalar('Loss',loss8)
tf.add_to_collection('losses', loss8)
with tf.name_scope('Live_Noise') as scope:
AllscZero = tf.cast(np.zeros([6,256,256,6]), tf.float32)
mean_squared_loss = tf.reduce_mean(
tf.reduce_mean(((tf.abs(tf.subtract(AllscZero,smaps)))),#
reduction_indices = 2),
reduction_indices = 1)
bin_labels_1=tf.cast(bin_labels,tf.float32)
bin_labels9= tf.concat([bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1],1)
mean_squared_loss=mean_squared_loss*(bin_labels9)#
loss9 = tf.reduce_mean(mean_squared_loss, name='pixel_loss9')*100*5
tf.summary.scalar('Loss',loss9)
tf.add_to_collection('losses', loss9)
with tf.name_scope('Spoof_Noise') as scope:
AllscOnes = tf.cast(tf.less(tf.abs(smaps),0.04),tf.float32) #
mean_squared_loss = tf.reduce_mean(
tf.reduce_mean(((tf.abs(smaps))),#
reduction_indices = 2),
reduction_indices = 1)
bin_labels_1=tf.cast(1-bin_labels,tf.float32)
bin_labels9= tf.concat([bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1],1)
mean_squared_loss2=mean_squared_loss*(bin_labels9)#
mean_squared_loss=tf.abs(mean_squared_loss2 -0.2) #
loss10 = tf.reduce_mean(mean_squared_loss, name='pixel_loss19')*10*3
tf.summary.scalar('Loss',loss10)
tf.add_to_collection('losses', loss10)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""
"""
#
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
for l in losses + [total_loss]:
#
#
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step, varName1):
"""
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
#opt = tf.train.GradientDescentOptimizer(lr)
opt = tf.train.AdamOptimizer(lr)
first_train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,varName1)
#
grads = opt.compute_gradients(total_loss,first_train_vars)
#####################################################################################################################
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Track the moving averages of all trainable variables.
with tf.name_scope('TRAIN') as scope:
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(first_train_vars)#tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2019 Yaojie Liu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: README.md
================================================
# Face De-Spoofing: Anti-Spoofing via Noise Modeling
Amin Jourabloo*, Yaojie Liu*, Xiaoming Liu

## Setup
Install the Tensorflow >=1.1, <2.0.
The source code files:
1. "Architecture.py": Contains the architectures and the definitions of the loss functions.
2. "data_train.py" : Contains the functions for reading the training data.
3. "Train.py" : The main training file that read the training data, computes the loss functions and backpropagates error.
4. "facepad-test.py": It performs the testing on the test videos and generates the score for each frame.
## Training
To run the training code:
source ~/tensorflow/bin/activate
python /data/train_demo/code/Train.py
deactivate
## Testing
To run the testing code on a test video ("Test_video.avi"):
1. python facepad-test.py -input Test_video.avi -isVideo 1
2. It will generate a txt file in the Score folder which contains the score for each frame.
## Acknowledge
Please cite the paper:
@inproceedings{eccv18jourabloo,
title={Face De-Spoofing: Anti-Spoofing via Noise Modeling},
author={Amin Jourabloo*, Yaojie Liu*, Xiaoming Liu},
booktitle={In Proceeding of European Conference on Computer Vision (ECCV 2018)},
address={Munich, Germany},
year={2018}
}
@inproceedings{eccv18jourabloo,
title={Learning Deep Models for Face Anti-Spoofing: Binary or Auxiliary Supervision},
author={Yaojie Liu*, Amin Jourabloo*, Xiaoming Liu},
booktitle={In Proceeding of IEEE Computer Vision and Pattern Recognition (CVPR 2018)},
address={Salt Lake City, UT},
year={2018}
}
If you have any question, please contact: [Amin Jourabloo](amin.jourabloo@gmail.com)
================================================
FILE: Train.py
================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import Architecture
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/research/cvl-liuyaoj1/tensorflow/model/ECCV2018/Oulu/P1', """Directory where to write event logs and checkpoint.""")
tf.app.flags.DEFINE_string('eval_data', 'train_eval',
"""Either 'test' or 'train_eval'.""")
tf.app.flags.DEFINE_integer('max_steps', 2000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_integer('log_frequency', 10,
"""How often to log results to the console.""")
tf.app.flags.DEFINE_string('gpu', '0',
"""GPU to use [1].""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default() as g:
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
with tf.name_scope('Input') as scope:
images, labels, _, sizes, slabels = cifar10.distorted_inputsB(1)
labels = tf.image.resize_images(labels,[32, 32])
print(images)
print(labels)
# Build a Graph that computes the logits predictions from the
# inference model.
dmaps, smaps, sc, dmaps_1, smaps_1, A, B,bin_labels, Nsc, Lsc, Allsc,sc_fake, sc_real, conv11_fir = cifar10.inference(images, sizes, labels, training_nn = True, training_class = True , _reuse= False)
print(smaps)
print(sc)
Label_Amin=sizes
# Calculate loss.
loss1= cifar10.lossSecond(dmaps, smaps, labels, slabels, sc, dmaps_1, smaps_1, A, B ,Label_Amin,bin_labels, Nsc, Lsc,sc_fake, sc_real)
print(loss1)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_opS = cifar10.train(loss1, global_step,"SecondAMIN")
dmaps, smaps, sc, dmaps_1, smaps_1, A, B,bin_labels, Nsc, Lsc, Allsc,sc_fake, sc_real, conv11_fir = cifar10.inference(images, sizes, labels, training_nn = True, training_class = True , _reuse= True)
loss3= cifar10.lossThird(dmaps, smaps, labels, slabels, sc, dmaps_1, smaps_1, A, B ,Label_Amin,bin_labels, Nsc, Lsc, Allsc,sc_fake, sc_real)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_opT = cifar10.train(loss3, global_step,"ThirdAMIN")
####################################################################################################################################
dmaps, smaps, sc, dmaps_1, smaps_1, A, B,bin_labels, Nsc, Lsc, Allsc,sc_fake, sc_real, conv11_fir = cifar10.inference(images, sizes, labels, training_nn = True, training_class = True , _reuse= True)
loss2= cifar10.lossFirst(dmaps, smaps, labels, slabels, sc, dmaps_1, smaps_1, A, B ,Label_Amin,bin_labels, Nsc, Lsc, Allsc,sc_fake, sc_real,conv11_fir)
print(loss2)
train_opF = cifar10.train(loss2, global_step,"FirstAMIN")# FirstAMIN
loss= loss1+ loss2 + loss3
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, g)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
self._start_time = time.time()
def before_run(self, run_context):
self._step += 1
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
if self._step % FLAGS.log_frequency == 0:
current_time = time.time()
duration = current_time - self._start_time
self._start_time = current_time
loss_value = run_values.results
examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
sec_per_batch = float(duration / FLAGS.log_frequency)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
i = 71000
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8, visible_device_list =FLAGS.gpu)
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement, gpu_options = gpu_options),save_checkpoint_secs=240
) as mon_sess:
while not mon_sess.should_stop():
if i % 100 == 1:
_, summary = mon_sess.run([train_opS, summary_op])
_, summary = mon_sess.run([train_opT, summary_op])
_, summary = mon_sess.run([train_opF, summary_op])
summary_writer.add_summary(summary, i)
else:
mon_sess.run(train_opS)
mon_sess.run(train_opT)
mon_sess.run(train_opF)
i += 1
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.MakeDirs(FLAGS.train_dir)
# tf.gfile.DeleteRecursively(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
================================================
FILE: data_train.py
================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import numpy.random
IMAGE_SIZE = 256
MAP_SIZE = 64
# Global constants describing the CIFAR-10 data set.
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 40000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 62957
def readFromFile(filename_queue): #,metaname_queue):
class DataRecord(object):
pass
result = DataRecord()
# Count the bytes for each sample
result.height = 256
result.width = 256
result.depth = 3
result.dmap_height = 64
result.dmap_width = 64
dmap_bytes = result.dmap_height * result.dmap_width
image_bytes = result.height * result.width * result.depth
record_bytes = dmap_bytes + image_bytes + 1
#
# Read a record
data_reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, data_value = data_reader.read(filename_queue)
#
# Convert from a string to a vector of uint8 that is record_bytes long.
data_in_bytes = tf.decode_raw(data_value, tf.uint8)
#meta_in_bytes = tf.decode_raw(meta_value, tf.int64)
#
img = tf.reshape(
tf.strided_slice(data_in_bytes, [0],
[0 + image_bytes]),
[result.depth, result.height, result.width])
result.image = tf.cast(tf.transpose(img, [1, 2, 0]), tf.float32) / 256
#
dmap = tf.reshape(
tf.strided_slice(data_in_bytes, [image_bytes],
[image_bytes + dmap_bytes]),
[1, result.dmap_height, result.dmap_width])
result.dmap = tf.cast(tf.transpose(dmap, [1, 2, 0]), tf.float32) / 256
result.label = tf.cast(
tf.strided_slice(data_in_bytes, [image_bytes + dmap_bytes], [image_bytes + dmap_bytes + 1]), tf.int32)
return result
def _generate_image_and_label_batch(image, dmap, label, min_queue_examples,
batch_size, shuffle):
num_preprocess_threads = 16
if shuffle:
images, dmaps, labels = tf.train.shuffle_batch(
[image, dmap, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
allow_smaller_final_batch=False,
min_after_dequeue=min_queue_examples)
else:
images, dmaps, labels = tf.train.batch(
[image, dmap, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
allow_smaller_final_batch=False,
capacity=min_queue_examples + 3 * batch_size)
irgb, ihsv = tf.split(images, num_or_size_splits=2, axis=3)
# Display the training images in the visualizer.
tf.summary.image('input1', irgb)
tf.summary.image('input2', ihsv)
tf.summary.image('input3', dmaps)
return images, dmaps, labels, labels, labels
def distorted_inputs(data_dir, batch_size):
filenames11 = [os.path.join(data_dir, '/research/cvlshare/Databases/Oulu/bin/1s/train_%d.dat' % i)
for i in xrange(1,400)]
filenames = filenames11
metanames11 = [os.path.join(data_dir, '/data/train_demo/bin1/train_meta_%d.dat' % i)
for i in xrange(1,200)]
metanames12 = [os.path.join(data_dir, '/data/train_demo/bin2/train_meta_%d.dat' % i)
for i in xrange(1,200)]
metanames = metanames11 + metanames12 #+ metanames13 + metanames2 #+ metanames21
names = list(zip(filenames, metanames))
numpy.random.shuffle(names)
filenames, metanames = zip(*names)
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
#metaname_queue = tf.train.string_input_producer(metanames)
# Read examples from files in the filename queue.
read_input = readFromFile(filename_queue)#, metaname_queue)
height = IMAGE_SIZE
width = IMAGE_SIZE
# data augmentation
distorted_image = read_input.image
#distorted_image = tf.image.random_flip_left_right(distorted_image)
hsv_image = tf.image.rgb_to_hsv(distorted_image)
float_image = tf.concat([hsv_image,distorted_image],axis = 2)
#
float_image.set_shape([height, width, 6])
read_input.dmap.set_shape([MAP_SIZE, MAP_SIZE, 1])
read_input.label.set_shape([1])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.1
min_queue_examples = int(num_examples_per_epoch * min_fraction_of_examples_in_queue)
print ('Filling queue with %d CASIA AntiSpoofing images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.dmap, read_input.label,
min_queue_examples, batch_size,
shuffle=True)
def distorted_inputsA(data_dir, batch_size):
filenames21 = [os.path.join(data_dir, '/data/train_demo/bin4/train_%d.dat' % i)
for i in xrange(1,20)]
filenames = filenames21
metanames21 = [os.path.join(data_dir, '/data/train_demo/mix/train_meta_%d.dat' % i)
for i in xrange(1,20)]
metanames = metanames21
names = list(zip(filenames, metanames))
numpy.random.shuffle(names)
filenames, metanames = zip(*names)
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
metaname_queue = tf.train.string_input_producer(metanames)
# Read examples from files in the filename queue.
read_input = readFromFile(filename_queue, metaname_queue)
height = IMAGE_SIZE
width = IMAGE_SIZE
# data augmentation
distorted_image = read_input.image
#distorted_image = tf.image.random_flip_left_right(distorted_image)
hsv_image = tf.image.rgb_to_hsv(distorted_image)
float_image = tf.concat([hsv_image,distorted_image],axis = 2)
#float_image = distorted_image
# Set the shapes of tensors.
float_image.set_shape([height, width, 6])
read_input.dmap.set_shape([MAP_SIZE, MAP_SIZE, 1])
read_input.label.set_shape([1])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.1
min_queue_examples = int(num_examples_per_epoch * min_fraction_of_examples_in_queue)
print ('Filling queue with %d CASIA AntiSpoofing images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.dmap, read_input.label, read_input.size, read_input.slabel,
min_queue_examples, batch_size,
shuffle=True)
def inputs(testset, data_dir, batch_size):
if testset == 1:
filenames = [os.path.join(data_dir, 'CASIA-FASD/CASIA_test_%d.dat' % i)
for i in xrange(1,11)]
metanames = [os.path.join(data_dir, 'CASIA-FASD/CASIA_test_meta_%d.dat' % i)
for i in xrange(1,11)]
elif testset == 2:
filenames1 = [os.path.join(data_dir, 'CASIA-FASD/CASIA_train_%d.dat' % i)
for i in xrange(1,11)]
filenames2 = [os.path.join(data_dir, 'New_DataSet/BONUS6_train_%d.dat' % i)
for i in xrange(1,11)]
filenames = filenames1
metanames1 = [os.path.join(data_dir, 'CASIA-FASD/CASIA_train_meta_%d.dat' % i)
for i in xrange(1,11)]
metanames2 = [os.path.join(data_dir, 'New_DataSet/BONUS6_train_meta_%d.dat' % i)
for i in xrange(1,11)]
metanames = metanames1
elif testset == 3:
filenames = [os.path.join(data_dir, 'REPLAY-ATTACK/REPLAY-ATTACK/IDIAP128_test_%d.dat' % i)
for i in xrange(1,11)]
metanames = [os.path.join(data_dir, 'REPLAY-ATTACK/REPLAY-ATTACK/IDIAP128_test_meta_%d.dat' % i)
for i in xrange(1,11)]
else:
filenames = [os.path.join(data_dir, 'REPLAY-ATTACK/REPLAY-ATTACK/IDIAP128_test_%d.dat' % i)
for i in xrange(1,11)]
metanames = [os.path.join(data_dir, 'REPLAY-ATTACK/REPLAY-ATTACK/IDIAP128_train_meta_%d.dat' % i)
for i in xrange(1,11)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
metaname_queue = tf.train.string_input_producer(metanames)
# Read examples from files in the filename queue.
read_input = readFromFile(filename_queue, metaname_queue)
height = IMAGE_SIZE
width = IMAGE_SIZE
distorted_image = read_input.image
hsv_image = tf.image.rgb_to_hsv(distorted_image)
float_image = tf.concat([hsv_image,distorted_image],axis = 2)
# float_image = distorted_image
# Set the shapes of tensors.
float_image.set_shape([height, width, 6])
read_input.dmap.set_shape([MAP_SIZE, MAP_SIZE, 1])
read_input.label.set_shape([1])
read_input.size.set_shape([1])
read_input.slabel.set_shape([1])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.05
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples_in_queue)
print ('Filling queue with %d CASIA AntiSpoofing images before starting to test. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.dmap, read_input.label, read_input.size, read_input.slabel,
min_queue_examples, batch_size,
shuffle=False)
================================================
FILE: facepad-test.py
================================================
# Copyright 2018
#
# Yaojie Liu, Amin Jourabloo, Xiaoming Liu, Michigan State University
#
# All Rights Reserved.
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
#
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' Tutorial code to use facePAD model
This tutorial can test the face anti-spoofing system on both video and image
files.
Examples:
python facepad-test.py -input ./examples/ex1.mov -isVideo 1
python facepad-test.py -input ./examples/ex1.jpg -isVideo 0
Model Input:
image: Cropped face in RGB. Ideal size should be larger than 256*256
Model Output:
score: liveness score, range [-1,1]. Higher score (--> 1) denotes spoofness.
Other usage:
Pretrained model can also deploy via Tensorflow Serving. The instruction of
Tensorflow Serving can be found at:
https://www.tensorflow.org/serving/serving_basic
The signature of the model is:
inputs = {'images': facepad_inputs}
outputs = {'depths': facepad_output_depth,
'scores': facepad_output_scores}
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import cv2
import sys
import os
import time
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# face detector && pad set-up
faced_dir = './haarcascade_frontalface_alt.xml'
export_dir = './lib'
faceCascade = cv2.CascadeClassifier(faced_dir)
# Basic model parameters.
IMAGE_SIZE = 256 #input image size
# name_scope
inputname = "input:0"
outputname = "Mean_2:0"#SecondAmin/
def facePAD_API(image):
'''
API Input:
image: Cropped face in RGB at any size. Ideally, image is larger than
256*256 and the dtype is uint8
API Output:
score: liveness score, float32, range [-1,1]. Higher score (--> 1)
denotes spoofness.
'''
with tf.Session() as sess:
# load the facepad model
tf.saved_model.loader.load(sess,
[tf.saved_model.tag_constants.SERVING],
export_dir)
_input = tf.get_default_graph().get_tensor_by_name(inputname)
_output = tf.get_default_graph().get_tensor_by_name(outputname)
score = sess.run(_output,feed_dict={_input : image})
return score
def evaluate_image(imfile,scfile):
with tf.Session() as sess:
# load the facepad model
tf.saved_model.loader.load(sess,
[tf.saved_model.tag_constants.SERVING],
export_dir)
image = tf.get_default_graph().get_tensor_by_name(inputname)
scores = tf.get_default_graph().get_tensor_by_name(outputname)
# get the image
frame = cv2.imread(imfile)
# detect faces in the frame. Detected face in faces with (x,y,w,h)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(256,256)
)
try:
faces = sorted(faces, key=lambda x:x[2])
faces = [faces[0]] # only process the largest face
except:
print("No face detected!")
sys.exit()
for (x, y, w, h) in faces:
# crop face from frame
l = max(w,h)
face_raw = frame[y:y+l, x:x+l]
# run the facepad
sc = sess.run(scores,feed_dict={image : face_raw})
# save the score for video frames
scfile.write("%.3f\n" % sc)
return scfile
def evaluate_video(vdfile,scfile):
# get the video
video_capture = cv2.VideoCapture(vdfile)
bbox = np.loadtxt(vdfile[:-3]+'txt',dtype=np.str,delimiter=',')
bbox = bbox[:,1:]
(major_ver, _, _, _) = (cv2.__version__).split('.')
if int(major_ver) < 3:
totalframes = video_capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
else:
totalframes = video_capture.get(cv2.CAP_PROP_FRAME_COUNT)
with tf.Session() as sess:
# load the facepad model
tf.saved_model.loader.load(sess,
[tf.saved_model.tag_constants.SERVING],
export_dir)
image = tf.get_default_graph().get_tensor_by_name(inputname)
scores = tf.get_default_graph().get_tensor_by_name(outputname)
# container for last frame's detected faces
last_time_face = []
fr = 0
while(fr < totalframes and fr < bbox.shape[0]):
# get the frame from video
_, frame = video_capture.read()
x = int(bbox[fr,0])
y = int(bbox[fr,1])
w = int(bbox[fr,2]) - int(bbox[fr,0])
h = int(bbox[fr,3]) - int(bbox[fr,1])
fr += 1
l = max(w,h)
dl = l * 1.5 / 2
x = int(x - dl)
y = int(y - 1.1*dl)
l = int(l + dl + dl)
# crop face from frame
face_raw = frame[y:y+l, x:x+l]
#cv2.imshow('image',face_raw)
#cv2.waitKey(0)
#input()
# run the facepad
start = time.time()
sc = sess.run(scores,feed_dict={image : face_raw})
# save the score for video frames
scfile.write("%.3f\n" % sc)
print(sc)
return scfile
def getopts(argv,opts):
while argv: # While there are arguments left to parse...
if argv[0][0] == '-': # Found a "-name value" pair.
if argv[0][1] == 'h':
print('-h : help')
print('-input : STRING, the path to the testing video')
print('-isVideo : True/False, indicate if it is a video. Default as False.')
sys.exit()
opts[argv[0]] = argv[1] # Add key and value to the dictionary.
argv = argv[1:] # Reduce the argument list by copying it starting from index 1.
return opts
if __name__ == '__main__':
myargs = {}
myargs = getopts(sys.argv,myargs)
isVideo = myargs['-isVideo']
vdfile = myargs['-input']
if vdfile[-4] == '.':
scfile = open('./score/'+vdfile[-12:-3]+'score','w')
else:
scfile = open('./score/'+vdfile[-12:-4]+'score','w')
print(vdfile)
print('Processing...')
if isVideo == '1':
scfile = evaluate_video(vdfile,scfile)
else:
scfile = evaluate_image(vdfile,scfile)
scfile.close()
print('Done!')
================================================
FILE: score/1_1_36_1.score
================================================
0.107
0.089
0.123
0.131
0.149
0.147
0.142
0.120
0.117
0.131
0.116
0.136
0.132
0.114
0.121
0.120
0.123
0.132
0.124
0.125
0.131
0.124
0.132
0.124
0.132
0.127
0.123
0.123
0.123
0.113
0.101
0.102
0.139
0.164
0.137
0.128
0.133
0.124
0.132
0.137
0.123
0.114
0.137
0.114
0.134
0.155
0.137
0.128
0.127
0.126
0.120
0.138
0.145
0.137
0.140
0.130
0.130
0.112
0.124
0.101
0.136
0.086
0.114
0.117
0.115
0.107
0.121
0.114
0.118
0.125
0.116
0.149
0.134
0.123
0.129
0.138
0.120
0.105
0.118
0.124
0.130
0.127
0.125
0.113
0.116
0.116
0.125
0.119
0.130
0.113
0.130
0.097
0.149
0.123
0.129
0.129
0.132
0.123
0.120
0.123
0.117
0.127
0.133
0.134
0.137
0.136
0.153
0.144
0.123
0.128
0.118
0.133
0.116
0.114
0.117
0.112
0.118
0.117
0.123
0.112
0.120
0.087
0.148
0.123
0.144
0.135
0.132
0.147
0.138
0.136
0.139
0.125
0.123
0.118
0.130
0.131
0.116
0.142
0.132
0.134
0.136
0.138
0.138
0.138
0.140
0.128
0.126
0.131
0.133
0.125
0.133
================================================
FILE: score/1_1_36_3.score
================================================
0.474
0.469
0.470
0.471
0.455
0.466
0.469
0.483
0.465
0.466
0.469
0.468
0.457
0.459
0.465
0.463
0.460
0.464
0.470
0.465
0.471
0.468
0.466
0.473
0.472
0.480
0.464
0.473
0.456
0.465
0.457
0.448
0.460
0.453
0.456
0.463
0.465
0.454
0.457
0.464
0.454
0.443
0.466
0.470
0.456
0.469
0.465
0.469
0.455
0.475
0.463
0.469
0.455
0.463
0.452
0.473
0.466
0.462
0.451
0.463
0.472
0.468
0.465
0.475
0.478
0.466
0.479
0.460
0.472
0.459
0.460
0.471
0.471
0.474
0.473
0.464
0.469
0.472
0.465
0.475
0.473
0.478
0.469
0.470
0.456
0.479
0.473
0.476
0.464
0.484
0.469
0.464
0.465
0.467
0.461
0.465
0.456
0.468
0.467
0.464
0.465
0.473
0.458
0.465
0.486
0.473
0.482
0.470
0.484
0.487
0.480
0.465
0.480
0.468
0.461
0.471
0.466
0.464
0.466
0.457
0.457
0.469
0.480
0.473
0.467
0.466
0.473
0.472
0.476
0.459
0.475
0.469
0.475
0.464
0.464
0.474
0.457
0.479
0.469
0.471
0.465
0.473
0.466
0.478
0.475
0.468
0.482
0.475
0.460
0.452
0.462
================================================
FILE: score/1_1_36_5.score
================================================
0.529
0.509
0.525
0.530
0.503
0.512
0.547
0.547
0.543
0.537
0.530
0.536
0.543
0.536
0.544
0.529
0.529
0.534
0.534
0.542
0.528
0.532
0.542
0.550
0.544
0.550
0.538
0.524
0.534
0.544
0.543
0.514
0.541
0.529
0.532
0.533
0.537
0.539
0.506
0.511
0.520
0.522
0.508
0.526
0.521
0.522
0.526
0.535
0.522
0.533
0.529
0.524
0.509
0.520
0.521
0.524
0.524
0.531
0.521
0.541
0.527
0.547
0.532
0.524
0.530
0.533
0.512
0.513
0.524
0.518
0.518
0.513
0.521
0.510
0.521
0.510
0.542
0.520
0.510
0.529
0.541
0.547
0.528
0.531
0.523
0.532
0.527
0.530
0.523
0.531
0.539
0.536
0.559
0.550
0.546
0.538
0.544
0.539
0.527
0.541
0.535
0.531
0.536
0.536
0.535
0.522
0.535
0.540
0.541
0.545
0.530
0.532
0.536
0.525
0.520
0.522
0.540
0.532
0.519
0.497
0.511
0.503
0.529
0.517
0.513
0.522
0.519
0.514
0.523
0.520
0.508
0.516
0.513
0.512
0.520
0.521
0.520
0.505
0.518
0.513
0.507
0.518
0.521
0.520
0.514
0.522
0.511
0.525
0.523
0.514
0.514
gitextract_qtmt8y7o/
├── Architecture.py
├── LICENSE
├── README.md
├── Train.py
├── data_train.py
├── facepad-test.py
├── lib/
│ ├── saved_model.pb
│ └── variables/
│ ├── variables.data-00000-of-00001
│ └── variables.index
└── score/
├── 1_1_36_1.score
├── 1_1_36_3.score
└── 1_1_36_5.score
SYMBOL INDEX (22 symbols across 4 files) FILE: Architecture.py function _activation_summary (line 47) | def _activation_summary(x): function _variable_on_cpu (line 57) | def _variable_on_cpu(name, shape, initializer): function _variable_with_weight_decay (line 66) | def _variable_with_weight_decay(name, shape, stddev, wd): function distorted_inputsB (line 84) | def distorted_inputsB(a): function inputs (line 101) | def inputs(testset): function inference (line 121) | def inference(images, size,labels, training_nn, training_class, _reuse): function lossSecond (line 512) | def lossSecond(dmaps, smaps, labels, slabels, sc,GT2, fftmapA, A, B,bin_... function lossThird (line 534) | def lossThird(dmaps, smaps, labels, slabels, sc,GT2, fftmapA, A, B,bin_l... function lossFirst (line 562) | def lossFirst(dmaps, smaps, labels, slabels, sc,GT2, fftmapA, A, B,bin_l... function _add_loss_summaries (line 711) | def _add_loss_summaries(total_loss): function train (line 728) | def train(total_loss, global_step, varName1): FILE: Train.py function train (line 27) | def train(): function main (line 131) | def main(argv=None): # pylint: disable=unused-argument FILE: data_train.py function readFromFile (line 22) | def readFromFile(filename_queue): #,metaname_queue): function _generate_image_and_label_batch (line 70) | def _generate_image_and_label_batch(image, dmap, label, min_queue_examples, function distorted_inputs (line 98) | def distorted_inputs(data_dir, batch_size): function distorted_inputsA (line 156) | def distorted_inputsA(data_dir, batch_size): function inputs (line 211) | def inputs(testset, data_dir, batch_size): FILE: facepad-test.py function facePAD_API (line 82) | def facePAD_API(image): function evaluate_image (line 104) | def evaluate_image(imfile,scfile): function evaluate_video (line 141) | def evaluate_video(vdfile,scfile): function getopts (line 194) | def getopts(argv,opts):
Condensed preview — 12 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (61K chars).
[
{
"path": "Architecture.py",
"chars": 29440,
"preview": "\n# pylint: disable=missing-docstring\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __futur"
},
{
"path": "LICENSE",
"chars": 1067,
"preview": "MIT License\n\nCopyright (c) 2019 Yaojie Liu\n\nPermission is hereby granted, free of charge, to any person obtaining a copy"
},
{
"path": "README.md",
"chars": 1801,
"preview": "# Face De-Spoofing: Anti-Spoofing via Noise Modeling\nAmin Jourabloo*, Yaojie Liu*, Xiaoming Liu\n\n
About this extraction
This page contains the full source code of the yaojieliu/ECCV2018-FaceDeSpoofing GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 12 files (56.7 KB), approximately 17.1k tokens, and a symbol index with 22 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.