[
  {
    "path": "Architecture.py",
    "content": "\n# pylint: disable=missing-docstring\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\nimport tarfile\n\nfrom six.moves import urllib\nimport tensorflow as tf\nimport numpy as np\nimport tensorflow.contrib.layers as layers\n\nimport data_train\n\nFLAGS = tf.app.flags.FLAGS\narg_scope = tf.contrib.framework.arg_scope\n\n# Basic model parameters.\ntf.app.flags.DEFINE_integer('batch_size', 6,\n                            \"\"\"Number of images to process in a batch.\"\"\")\ntf.app.flags.DEFINE_string('data_dir', '/data/train_demo',\n                           \"\"\"Path to the Anti-Spoofing data directory.\"\"\")\ntf.app.flags.DEFINE_boolean('use_fp16', False,\n                            \"\"\"Train the model using fp16.\"\"\")\n\n# Global constants describing the CIFAR-10 data set.\nIMAGE_SIZE = data_train.IMAGE_SIZE\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = data_train.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = data_train.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n\n\n# Constants describing the training process.\nMOVING_AVERAGE_DECAY = 0.9999     # The decay to use for the moving average.\nNUM_EPOCHS_PER_DECAY = 15.0      # Epochs after which learning rate decays.\nLEARNING_RATE_DECAY_FACTOR = 0.8  # Learning rate decay factor.\nINITIAL_LEARNING_RATE = 0.00003      # Initial learning rate.\nR_FOR_LSE = 10\n\n\nTOWER_NAME = 'tower'\n\n\ndef _activation_summary(x):\n  \"\"\"\n    nothing\n  \"\"\"\n  # \n  # \n  print(x.shape)\n  tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)\n  tf.summary.histogram(tensor_name + '/activations', x)\n\ndef _variable_on_cpu(name, shape, initializer):\n  \"\"\"\n  \"\"\"\n  with tf.device('/cpu:0'):\n    dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n    var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n  return var\n\n\ndef _variable_with_weight_decay(name, shape, stddev, wd):\n  \"\"\"\n    \n  \"\"\"\n  dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n  var = _variable_on_cpu(\n      name,\n      shape,\n      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n  if wd is not None:\n    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n    tf.add_to_collection('losses', weight_decay)\n  return var\n\n\n\n\n\ndef distorted_inputsB(a):\n  if not FLAGS.data_dir:\n    raise ValueError('Please supply a data_dir')\n  data_dir = FLAGS.data_dir\n  if a==1:\t\n    images, dmaps, labels, sizes, slabels = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=FLAGS.batch_size)\n  else:\n    images, dmaps, labels, sizes, slabels = cifar10_input.distorted_inputsA(data_dir=data_dir, batch_size=FLAGS.batch_size)\n  if FLAGS.use_fp16:\n    images = tf.cast(images, tf.float16)\n    dmaps  = tf.case(images, tf.float16)\n\n  return images, dmaps, labels, sizes, slabels\n\n\n\n\ndef inputs(testset):\n  if not FLAGS.data_dir:\n    raise ValueError('Please supply a data_dir')\n  data_dir = FLAGS.data_dir\n  images, dmaps, labels, sizes, slabels = cifar10_input.inputs(testset = testset,\n\t\t\t\t       data_dir=data_dir,\n                                       batch_size=FLAGS.batch_size)\n  if FLAGS.use_fp16:\n    images = tf.cast(images, tf.float16)\n    dmaps  = tf.case(images, tf.float16)\n\n  return images, dmaps, labels, sizes, slabels\n\n\n\n\n\n\n\n\ndef inference(images, size,labels, training_nn, training_class, _reuse):\n  #\n  #\n  batch_norm_decay = 0.9\n  batch_norm_epsilon = 1e-5\n  batch_norm_scale = True\n  batch_norm_params = {\n    'is_training': training_nn,\n    'decay': batch_norm_decay,\n    'epsilon': batch_norm_epsilon,\n    'scale': batch_norm_scale,\n    'updates_collections': None, #\n  }\t\n  with arg_scope( [layers.conv2d],\n\t\t     kernel_size = 3,\n\t\t     weights_initializer = tf.random_normal_initializer(stddev=0.02),\n\t\t     biases_initializer  = tf.constant_initializer(0.0),\n\t\t     activation_fn=tf.nn.elu, \n\t\t     normalizer_fn=layers.batch_norm,\n\t\t     normalizer_params=batch_norm_params,\n\t\t     trainable = training_nn,\n\t\t     reuse=_reuse,\n\t\t     padding='SAME',\n\t\t     stride=1):   \n\n\n\tconv0 = layers.conv2d(images,num_outputs = 64, scope='SecondAMIN/conv0')\n\twith tf.name_scope('convBlock-1') as scope:\n          conv1  = layers.conv2d(conv0,num_outputs = 128, scope='SecondAMIN/conv1')\n          bconv1 = layers.conv2d(conv1,num_outputs = 196, scope='SecondAMIN/bconv1')\n          conv2  = layers.conv2d(bconv1, num_outputs = 128, scope='SecondAMIN/conv2')\n\t  pool1  = layers.max_pool2d(conv2, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool1')\n\t  _activation_summary(conv1)\n\t  _activation_summary(bconv1)\n\t  _activation_summary(conv2)\n\n\twith tf.name_scope('convBlock-2') as scope:\n          conv3  = layers.conv2d(pool1, num_outputs = 128, scope='SecondAMIN/conv3')\n          bconv2 = layers.conv2d(conv3, num_outputs = 196, scope='SecondAMIN/bconv2')\n          conv4  = layers.conv2d(bconv2, num_outputs = 128, scope='SecondAMIN/conv4')\n\t  pool2  = layers.max_pool2d(conv4, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool2')\n\t  _activation_summary(conv3)\n\t  _activation_summary(bconv2)\n\t  _activation_summary(conv4)\n\n\twith tf.name_scope('convBlock-3') as scope:\n          conv5  = layers.conv2d(pool2, num_outputs = 128, scope='SecondAMIN/conv5')\n          bconv3 = layers.conv2d(conv5, num_outputs = 196, scope='SecondAMIN/bconv3')\n\t  conv6  = layers.conv2d(bconv3, num_outputs = 128, scope='SecondAMIN/conv6')\n\t  pool3  = layers.avg_pool2d(conv6, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool3')\n\t  _activation_summary(conv5)\n\t  _activation_summary(bconv3)\n\t  _activation_summary(conv6)\n\n\tmap1 = tf.image.resize_images(pool1,[32,32])\n\tmap2 = tf.image.resize_images(pool2,[32,32])\n\tmap3 = tf.image.resize_images(pool3,[32,32])\n\t  \n        summap = tf.concat([map1, map2, map3],3)\n          \n\t# \n\twith tf.name_scope('Depth-Map-Block') as scope:\n\t  conv7 = layers.conv2d(summap, num_outputs = 128, scope='SecondAMIN/conv7')\n\t  dp1 = tf.layers.dropout(conv7,rate = 0.2, training = training_nn, name = 'SecondAMIN/dropout1')\n\t  conv8 = layers.conv2d(dp1, num_outputs = 64, scope='SecondAMIN/conv8')\n\t  _activation_summary(conv7)\n\t  _activation_summary(conv8)\n  \n\n  with arg_scope( [layers.conv2d],\n\t\t     kernel_size = 3,\n\t\t     weights_initializer = tf.random_normal_initializer(stddev=0.02),\n\t\t     biases_initializer  = tf.constant_initializer(0.0),\n\t\t     activation_fn= None, \n\t\t     normalizer_fn= None,\n\t\t     padding='SAME',\n                     trainable = training_nn,\n\t\t     reuse=_reuse,\n\t\t     stride=1):   \n\t# \n\tconv11 = layers.conv2d(conv8, num_outputs = 1, scope='SecondAMIN/conv11')\n\t_activation_summary(conv11)\n        tf.summary.image('depthMap_Second', conv11, max_outputs=FLAGS.batch_size)  \n\n\n\n\n\n  \n\t\n  with arg_scope( [layers.conv2d],\n\t\t     kernel_size = 3,\n\t\t     weights_initializer = tf.random_normal_initializer(stddev=0.02),\n\t\t     biases_initializer  = tf.constant_initializer(0.0),\n\t\t     activation_fn=tf.nn.elu, \n\t\t     normalizer_fn=layers.batch_norm,\n\t\t     normalizer_params=batch_norm_params,\n\t\t     trainable = training_nn,\n\t\t     reuse=_reuse,\n\t\t     padding='SAME',\n\t\t     stride=1):   \n \n\n\n \n\tconv0_fir = layers.conv2d(images,num_outputs = 24, scope='FirstAMIN/conv0') #\n\tpool1_fir  = layers.max_pool2d(conv0_fir, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='FirstAMIN/pool1')\n\twith tf.name_scope('convBlock-1_fir') as scope:\n          conv1_fir  = layers.conv2d(pool1_fir,num_outputs = 20, scope='FirstAMIN/conv1')#\n          bconv1_fir = layers.conv2d(conv1_fir,num_outputs = 25, scope='FirstAMIN/bconv1')#\n          conv2_fir  = layers.conv2d(bconv1_fir, num_outputs = 20, scope='FirstAMIN/conv2')#\n\t  \n\n\twith tf.name_scope('convBlock-2_fir') as scope:\n\t  pool2_fir  = layers.max_pool2d(conv2_fir, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='FirstAMIN/pool2')\n          conv3_fir  = layers.conv2d(pool2_fir, num_outputs = 20, scope='FirstAMIN/conv3')\n          bconv2_fir = layers.conv2d(conv3_fir, num_outputs = 25, scope='FirstAMIN/bconv2')\n          conv4_fir  = layers.conv2d(bconv2_fir, num_outputs = 20, scope='FirstAMIN/conv4')\n\t  \n\n\twith tf.name_scope('convBlock-3_fir') as scope:\n\t  pool3_fir  = layers.avg_pool2d(conv4_fir, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='FirstAMIN/pool3')\n          conv5_fir  = layers.conv2d(pool3_fir, num_outputs = 20, scope='FirstAMIN/conv5')\n          bconv3_fir = layers.conv2d(conv5_fir, num_outputs = 25, scope='FirstAMIN/bconv3')\n\t  conv6_fir  = layers.conv2d(bconv3_fir, num_outputs = 20, scope='FirstAMIN/conv6')\n\n\n\tmap1_fir = tf.image.resize_images(conv2_fir,[32,32])\n\tmap2_fir = tf.image.resize_images(conv4_fir,[32,32])\n\tmap3_fir = conv6_fir\n\t\n        summap_fir = tf.concat([map1_fir, map2_fir, map3_fir],3)\n\n\n\t#\n\twith tf.name_scope('Depth-Map-Block_fir') as scope:\n\t  conv7_fir = layers.conv2d(summap_fir, num_outputs = 28, scope='FirstAMIN/conv7')\n\t  dp1_fir = tf.layers.dropout(conv7_fir,rate = 0, training = training_nn, name = 'FirstAMIN/dropout2')\n\t  conv8_fir = layers.conv2d(dp1_fir, num_outputs =16 , scope='FirstAMIN/conv8')\n\t \n\n\n  with arg_scope( [layers.conv2d],\n\t\t     kernel_size = 3,\n\t\t     weights_initializer = tf.random_normal_initializer(stddev=0.02),\n\t\t     biases_initializer  = None, #\n\n\t\t     activation_fn= None, \n\t\t     normalizer_fn= None,\n\t\t     padding='SAME',\n\t\t     reuse=_reuse,\n\t\t     stride=1):   \n\t# \n\tconv11_fir = layers.conv2d(conv8_fir, num_outputs = 1, scope='FirstAMIN/conv11')\n\ttf.summary.image('ZeroOneMap', tf.cast(256*conv11_fir,tf.uint8), max_outputs=FLAGS.batch_size)  \n  \n\t\n  with arg_scope( [layers.conv2d],\n\t\t     kernel_size = 3,\n\t\t     weights_initializer = tf.random_normal_initializer(stddev=0.02),\n\t\t     biases_initializer  = tf.constant_initializer(0.0),\n\t\t     activation_fn=tf.nn.elu, \n\t\t     normalizer_fn=layers.batch_norm,\n\t\t     normalizer_params=batch_norm_params,\n\t\t     trainable = training_nn,\n\t\t     padding='SAME',\n\t\t     reuse=_reuse,\n\t\t     stride=1):   \n\n\n  \t#\n\twith tf.name_scope('Score-Map-Block09') as scope:\n\t  summap_fir = tf.image.resize_images(summap_fir,[256,256])\n\t  conv9_fir = layers.conv2d(summap_fir, num_outputs = 28, scope='FirstAMIN/conv9')\n\t  conv10_fir = layers.conv2d(conv9_fir, num_outputs = 24, scope='FirstAMIN/conv10')\n\t  #\n\n\t  conv12_fir = layers.conv2d(conv10_fir, num_outputs = 20, scope='FirstAMIN/conv12')\n\t  conv13_fir = layers.conv2d(conv12_fir, num_outputs = 20, scope='FirstAMIN/conv13')\n\t  #\n\t  conv14_fir = layers.conv2d(conv13_fir, num_outputs = 20, scope='FirstAMIN/conv14')\n\t  conv15_fir = layers.conv2d(conv14_fir, num_outputs = 16, scope='FirstAMIN/conv15')\n\t  #\n\t  conv16_fir = layers.conv2d(conv15_fir, num_outputs = 16, scope='FirstAMIN/conv16')\n\n\n\n  with arg_scope( [layers.conv2d],\n\t\t     kernel_size = 3,\n\t\t     weights_initializer = tf.random_normal_initializer(stddev=0.002),\n\t\t     biases_initializer  = None, #tf.constant_initializer(0.0),\n\n\t\t     activation_fn= None, \n\t\t     normalizer_fn= None,\n\t\t     padding='SAME',\n\t\t     reuse=_reuse,\n\t\t     stride=1): \n\t  conv17 = layers.conv2d(conv16_fir, num_outputs = 6, scope='FirstAMIN/conv17')\n\t  \n          thirdPart_comp_1 = tf.complex(conv17, tf.zeros_like(conv17))\n          thirdPart_comp_1=tf.transpose(thirdPart_comp_1, perm=[0,3,1,2])\n\n          thirdPart_fft_1=tf.abs(tf.fft2d(thirdPart_comp_1, name='summap_fft_real_1'))\n          thirdPart_fft_1=tf.transpose(thirdPart_fft_1, perm=[0,2,3,1])\n\t  thirdPart_fft_1=tf.log1p(thirdPart_fft_1[:,32:256-32,32:256-32,:])\n\n\n\n\n\t  #\n\t  Live_est1= images-conv17/45  \n          Live_est_mask = tf.cast(tf.greater(Live_est1,0),tf.float32)                             \n          Live_est=Live_est1*Live_est_mask\n\t  #\n\n\n\n#################################################################################################################################\n\n  with arg_scope( [layers.conv2d],\n\t\t     kernel_size = 3,\n\t\t     weights_initializer = tf.random_normal_initializer(stddev=0.02),\n\t\t     biases_initializer  = tf.constant_initializer(0.0),\n\t\t     activation_fn=tf.nn.elu, \n\t\t     normalizer_fn=layers.batch_norm,\n\t\t     normalizer_params=batch_norm_params,\n\t\t     trainable = training_nn,\n\t\t     padding='SAME',\n\t\t     reuse=_reuse,\n\t\t     stride=1):   \n \n\n  \t# Score Map Branch\n\twith tf.name_scope('Score-Map-Block1_dis') as scope:\n\t \n\t  conv9_dis = layers.conv2d(Live_est, num_outputs = 24, scope='ThirdAMIN/conv9')\n\t  conv10_dis = layers.conv2d(conv9_dis, num_outputs = 20, scope='ThirdAMIN/conv10')\n    \t  pool1_dis  = layers.max_pool2d(conv10_dis, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool1')\n\n\t  conv12_dis = layers.conv2d(pool1_dis, num_outputs = 20, scope='ThirdAMIN/conv12')\n\t  conv13_dis = layers.conv2d(conv12_dis, num_outputs = 16, scope='ThirdAMIN/conv13')\n    \t  pool2_dis  = layers.max_pool2d(conv13_dis, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool2')\n\n\t  conv14_dis = layers.conv2d(pool2_dis, num_outputs = 12, scope='ThirdAMIN/conv14')\n\t  conv15_dis = layers.conv2d(conv14_dis, num_outputs = 6, scope='ThirdAMIN/conv15')\n    \t  pool3_dis  = layers.max_pool2d(conv15_dis, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool3')\n\n\t  conv16_dis = layers.conv2d(pool3_dis, num_outputs = 1, scope='ThirdAMIN/conv16')\n\n\n \t  conv20_dis=tf.reshape(conv16_dis, [6,32*32])\n\t  sc333_dis  = layers.fully_connected(conv20_dis, num_outputs = 100, reuse=_reuse, scope='ThirdAMIN/bconv15_sc333_dis')\n\n\t  dp1_dis = tf.layers.dropout(sc333_dis,rate = 0.2, training = training_nn, name = 'dropout3')\n      \n\t  sc  = layers.fully_connected(dp1_dis, num_outputs = 2, reuse=_reuse,\n\t\t     weights_initializer = tf.random_normal_initializer(stddev=0.02),\n\t\t     biases_initializer  = None, #tf.constant_initializer(0.0),\n\n\t\t     activation_fn= None, \n\t\t     normalizer_fn= None,scope='ThirdAMIN/bconv10_sc')\n\n\n\t  conv9_dis2 = layers.conv2d(images, num_outputs = 24, reuse= True, scope='ThirdAMIN/conv9')\n\t  conv10_dis2 = layers.conv2d(conv9_dis2, num_outputs = 20,  reuse= True, scope='ThirdAMIN/conv10')\n    \t  pool1_dis2  = layers.max_pool2d(conv10_dis2, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool1')\n\n\t  conv12_dis2 = layers.conv2d(pool1_dis2, num_outputs = 20,reuse= True, scope='ThirdAMIN/conv12')\n\t  conv13_dis2 = layers.conv2d(conv12_dis2, num_outputs = 16, reuse= True,    scope='ThirdAMIN/conv13')\n    \t  pool2_dis2  = layers.max_pool2d(conv13_dis2, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool2')\n\n\t  conv14_dis2 = layers.conv2d(pool2_dis2, num_outputs = 12,  reuse= True, scope='ThirdAMIN/conv14')\n\t  conv15_dis2 = layers.conv2d(conv14_dis2, num_outputs = 6,  reuse= True, scope='ThirdAMIN/conv15')\n    \t  pool3_dis2  = layers.max_pool2d(conv15_dis2, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool3')\n\n\t  conv16_dis2 = layers.conv2d(pool3_dis2, num_outputs = 1,  reuse= True, scope='ThirdAMIN/conv16')\n\n\n \t  conv20_dis2=tf.reshape(conv16_dis2, [6,32*32])\n\t  sc333_dis2  = layers.fully_connected(conv20_dis2,  reuse= True, num_outputs = 100,scope='ThirdAMIN/bconv15_sc333_dis')\n\n\t  dp1_dis2 = tf.layers.dropout(sc333_dis2,rate = 0.2, training = training_nn, name = 'dropout4')\n      \n\t  sc2  = layers.fully_connected(dp1_dis2, num_outputs = 2,  reuse= True, \n\t\t     weights_initializer = tf.random_normal_initializer(stddev=0.02),\n\t\t     biases_initializer  = None, #tf.constant_initializer(0.0),\n\n\t\t     activation_fn= None, \n\t\t     normalizer_fn= None,scope='ThirdAMIN/bconv10_sc')\n##################################################################################################################################\n\n  batch_norm_decay = 0.9\n  batch_norm_epsilon = 1e-5\n  batch_norm_scale = True\n  batch_norm_params = { \n    'is_training': False,\n    'decay': batch_norm_decay,\n    'epsilon': batch_norm_epsilon,\n    'scale': batch_norm_scale,\n    'updates_collections': None, #\n    'trainable':False,\n    #'reuse':True\n  }\t\n  with arg_scope( [layers.conv2d],\n\t\t     kernel_size = 3,\n\t\t     weights_initializer = tf.random_normal_initializer(stddev=0.02),\n\t\t     biases_initializer  = tf.constant_initializer(0.0),\n\t\t     activation_fn=tf.nn.elu, \n\t\t     normalizer_fn=layers.batch_norm,\n\t\t     normalizer_params=batch_norm_params,\n\t\t     trainable = False,\n\t\t     padding='SAME',\n\t\t     reuse=True,\n\t\t     stride=1): \n #################################################################################################################################\n\n\tconv0_new = layers.conv2d(Live_est,num_outputs = 64, scope='SecondAMIN/conv0')\n\twith tf.name_scope('convBlock-1_new') as scope:\n          conv1_new  = layers.conv2d(conv0_new,num_outputs = 128, scope='SecondAMIN/conv1')\n          bconv1_new = layers.conv2d(conv1_new,num_outputs = 196, scope='SecondAMIN/bconv1')\n          conv2_new  = layers.conv2d(bconv1_new, num_outputs = 128, scope='SecondAMIN/conv2')\n\t  pool1_new  = layers.max_pool2d(conv2_new, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool1')\n\n\twith tf.name_scope('convBlock-2_new') as scope:\n          conv3_new  = layers.conv2d(pool1_new, num_outputs = 128, scope='SecondAMIN/conv3')\n          bconv2_new = layers.conv2d(conv3_new, num_outputs = 196, scope='SecondAMIN/bconv2')\n          conv4_new  = layers.conv2d(bconv2_new, num_outputs = 128, scope='SecondAMIN/conv4')\n\t  pool2_new  = layers.max_pool2d(conv4_new, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool2')\n\n\twith tf.name_scope('convBlock-3_new') as scope:\n          conv5_new  = layers.conv2d(pool2_new, num_outputs = 128, scope='SecondAMIN/conv5')\n          bconv3_new = layers.conv2d(conv5_new, num_outputs = 196, scope='SecondAMIN/bconv3')\n\t  conv6_new  = layers.conv2d(bconv3_new, num_outputs = 128, scope='SecondAMIN/conv6')\n\t  pool3_new  = layers.avg_pool2d(conv6_new, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool3')\n\n\tmap1_new = tf.image.resize_images(pool1_new,[32,32])\n\tmap2_new = tf.image.resize_images(pool2_new,[32,32])\n\tmap3_new = tf.image.resize_images(pool3_new,[32,32])\n\t  \n        summap_new = tf.concat([map1_new, map2_new, map3_new],3)\n          \n\t# Depth Map Branch\n\twith tf.name_scope('Depth-Map-Block_new') as scope:\n\t  conv7_new = layers.conv2d(summap_new, num_outputs = 128, scope='SecondAMIN/conv7')\n\t  dp1_new = tf.layers.dropout(conv7_new,rate = 0.2, training = training_nn, name = 'SecondAMIN/dropout1')\n\t  conv8_new = layers.conv2d(dp1_new, num_outputs = 64, scope='SecondAMIN/conv8')\n  \n\n  with arg_scope( [layers.conv2d],\n\t\t     kernel_size = 3,\n\t\t     weights_initializer = tf.random_normal_initializer(stddev=0.02),\n\t\t     biases_initializer  = tf.constant_initializer(0.0),\n\t\t     activation_fn= None, \n\t\t     normalizer_fn= None,\n\t\t     padding='SAME',\n                     trainable = False,\n\t\t     reuse=True,\n\t\t     stride=1):   \n\t# Depth Map Branch\n\tconv11_new = layers.conv2d(conv8_new, num_outputs = 1, scope='SecondAMIN/conv11')\n\n\n\n\n\n\n        label_Amin1=size\n        LabelsWholeImage=tf.cast(np.ones([6,32,32,1]), tf.float32)\n        LabelsWholeImage2=LabelsWholeImage*tf.reshape(tf.cast(1-label_Amin1,tf.float32),[6,1,1,1])\n        LabelsWholeImage=labels*tf.reshape(tf.cast(label_Amin1,tf.float32),[6,1,1,1])\n\n\tZ_GT2=np.zeros([6,3,3,1])\n\tZ_GT2[:,1,1,:]=1\n\tGT2=tf.cast(Z_GT2, tf.float32)\n\n\n\ttf.summary.image('GT2', LabelsWholeImage[:,:,:,0:1], max_outputs=FLAGS.batch_size) \n        tf.summary.image('SC', tf.cast(256*conv11[:,:,:,0:1],tf.uint8), max_outputs=FLAGS.batch_size) \n\n        tf.summary.image('Live_SC', tf.cast(256*conv11_new[:,:,:,0:1],tf.uint8), max_outputs=FLAGS.batch_size) \n        tf.summary.image('Live', tf.cast(256*Live_est[:,:,:,3:6],tf.uint8), max_outputs=FLAGS.batch_size) \n        tf.summary.image('inputImage', tf.cast(256*images[:,:,:,3:6],tf.uint8), max_outputs=FLAGS.batch_size) \n\ttf.summary.image('GT3_Artifact', LabelsWholeImage2[:,:,:,0:1], max_outputs=FLAGS.batch_size) \n        tf.summary.image('Artifact', conv17[:,:,:,3:6], max_outputs=FLAGS.batch_size)\n  return Live_est, conv17, conv11, GT2,conv17,images,thirdPart_fft_1,LabelsWholeImage, conv11_new,conv11_new  , LabelsWholeImage2, sc, sc2, conv11_fir\n\n # \n\n\n\n\ndef lossSecond(dmaps, smaps, labels, slabels, sc,GT2, fftmapA, A, B,bin_labels, bin_labels2,Nsc,Lsc,sc_fake, sc_real):\n  # \n  with tf.name_scope('DR_Net_Training') as scope:\n    mean_squared_loss = tf.reduce_mean(\n                                tf.reduce_mean(tf.abs(tf.subtract(sc,bin_labels2)),\n                                               reduction_indices = 2),\n                             reduction_indices = 1) \n    loss2 = tf.reduce_mean(mean_squared_loss, name='pixel_loss1')*1\n    tf.summary.scalar('Loss',loss2)\n    tf.add_to_collection('losses', loss2)\n\n\n\n\n\n\n  return tf.add_n(tf.get_collection('losses'), name='total_loss')\n\n\n\n\n\ndef lossThird(dmaps, smaps, labels, slabels, sc,GT2, fftmapA, A, B,bin_labels, bin_labels2,Nsc,Lsc,Allsc,sc_fake, sc_real):\n\n\n\n  with tf.name_scope('GAN_Training') as scope:\n    bin_labels3=tf.ones([6,1])\n    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n                    labels=tf.reshape(tf.cast(bin_labels3,tf.int32),[-1]), logits= tf.cast(sc_fake, tf.float32), \n\t\t    name='cross_entropy_per_example') # logits = (N,2)  label = (N,) tf.reshape(label,[-1])\n\n\n    loss22 = tf.reduce_mean(cross_entropy, name='classification_loss2')*1\n    tf.add_to_collection('losses', loss22)\n\n    bin_labels3=tf.zeros([6,1])\n    bin_labels_1=tf.cast(sc_real, tf.float32)*tf.cast(bin_labels,tf.float32)\n    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n                    labels=tf.reshape(tf.cast(bin_labels3,tf.int32),[-1]), logits= bin_labels_1, \n\t\t    name='cross_entropy_per_example2') # logits = (N,2)  label = (N,) tf.reshape(label,[-1])\n\n    loss23 = tf.reduce_mean(cross_entropy, name='classification_loss3')*1\n    tf.summary.scalar('Loss',loss23+loss22)\n    tf.add_to_collection('losses', loss23)\n\n\n  return tf.add_n(tf.get_collection('losses'), name='total_loss')\n\n\ndef lossFirst(dmaps, smaps, labels, slabels, sc,GT2, fftmapA, A, B,bin_labels, bin_labels2,Nsc,Lsc,Allsc,sc_fake, sc_real, conv11_fir):\n\n\n\n  with tf.name_scope('Zero_One_Map_loss') as scope:\n\n    mean_squared_loss = tf.reduce_mean(\n                                tf.reduce_mean(((tf.abs(tf.subtract(Allsc,conv11_fir)))),\n                                               reduction_indices = 2),\n                             reduction_indices = 1)\n\n    loss823 = tf.reduce_mean(mean_squared_loss, name='pixel_loss823')*6000\n    tf.summary.scalar('Loss',loss823)\n    tf.add_to_collection('losses', loss823)\n\n\n\n\n  with tf.name_scope('Dr_Net_Backpropagate') as scope:\n    bin_labels23=labels #tf.zeros_like(bin_labels2)\n    mean_squared_loss = tf.reduce_mean(\n                                tf.reduce_mean(tf.abs(tf.subtract(Lsc,bin_labels23)),\n                                               reduction_indices = 2),\n                             reduction_indices = 1) \n    loss32 = tf.reduce_mean(mean_squared_loss, name='pixel_loss32')*600\n    tf.summary.scalar('Loss',loss32)\n    tf.add_to_collection('losses', loss32)\n\n\n\n  with tf.name_scope('GAN_Backpropagate') as scope:\n    bin_labelsE=tf.zeros([6,1])\n    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n                    labels=tf.reshape(tf.cast(bin_labelsE,tf.int32),[-1]), logits= tf.cast(sc_fake, tf.float32), \n\t\t    name='cross_entropy_per_example')\n\n\n    loss22 = tf.reduce_mean(cross_entropy, name='classification_loss2')*1*100\n    tf.summary.scalar('Loss',loss22)\n    tf.add_to_collection('losses', loss22)\n\n\n  with tf.name_scope('Live_Repetitive_Pattern') as scope:\n\n    mean_squared_loss = tf.reduce_max(\n                                tf.reduce_max(B,\n                                               reduction_indices = 2),\n                             reduction_indices = 1)\n    \n    #\n    bin_labels_1=tf.cast(bin_labels,tf.float32)\n    bin_labels9= tf.concat([bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1],1)\n\n\n    mean_squared_loss=mean_squared_loss*(bin_labels9)\n\n    loss81= tf.reduce_mean(mean_squared_loss, name='pixel_loss81')*1\n    tf.summary.scalar('Loss',loss81)\n    tf.add_to_collection('losses', loss81)\n\n\n\n\n  with tf.name_scope('Spoof_Repetitive_Pattern') as scope:\n\n    mean_squared_loss = tf.reduce_max(\n                                tf.reduce_max(B,\n                                               reduction_indices = 2),\n                             reduction_indices = 1)\n    \n    bin_labels_1=tf.cast(1-bin_labels,tf.float32)\n    bin_labels9= tf.concat([bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1],1)\n\n\n    mean_squared_loss2=mean_squared_loss*(bin_labels9)\n    \n    mean_squared_loss=-mean_squared_loss2#\n    loss812= tf.reduce_mean(mean_squared_loss, name='pixel_loss812')*1*2\n    tf.summary.scalar('Loss',loss812)\n    tf.add_to_collection('losses', loss812)\n\n\n\n\n\n  with tf.name_scope('Live_Images_Estimation') as scope:\n\n    mean_squared_loss = tf.reduce_mean(\n                                tf.reduce_mean(((tf.abs(tf.subtract(A,dmaps)))),\n                                               reduction_indices = 2),\n                             reduction_indices = 1)\n    bin_labels_1=tf.cast(bin_labels,tf.float32)\n    bin_labels8= tf.concat([bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1],1)\n\n    mean_squared_loss=mean_squared_loss*(bin_labels8)\n\n    loss8 = tf.reduce_mean(mean_squared_loss, name='pixel_loss8')*150*300\n    tf.summary.scalar('Loss',loss8)\n\n    tf.add_to_collection('losses', loss8)\n\n\n  with tf.name_scope('Live_Noise') as scope:\n    AllscZero = tf.cast(np.zeros([6,256,256,6]), tf.float32)\n    mean_squared_loss = tf.reduce_mean(\n                                tf.reduce_mean(((tf.abs(tf.subtract(AllscZero,smaps)))),#\n                                               reduction_indices = 2),\n                             reduction_indices = 1) \n    bin_labels_1=tf.cast(bin_labels,tf.float32)\n    bin_labels9= tf.concat([bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1],1)\n\n\n    mean_squared_loss=mean_squared_loss*(bin_labels9)#\n\n    loss9 = tf.reduce_mean(mean_squared_loss, name='pixel_loss9')*100*5\n    tf.summary.scalar('Loss',loss9)\n    tf.add_to_collection('losses', loss9)\n\n\n  with tf.name_scope('Spoof_Noise') as scope:\n\n    AllscOnes = tf.cast(tf.less(tf.abs(smaps),0.04),tf.float32)  #\n    mean_squared_loss = tf.reduce_mean(\n                                tf.reduce_mean(((tf.abs(smaps))),#\n                                               reduction_indices = 2),\n                             reduction_indices = 1) \n\n    bin_labels_1=tf.cast(1-bin_labels,tf.float32)\n    bin_labels9= tf.concat([bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1],1)\n\n    mean_squared_loss2=mean_squared_loss*(bin_labels9)#\n\n    mean_squared_loss=tf.abs(mean_squared_loss2 -0.2) #\n\n\n    loss10 = tf.reduce_mean(mean_squared_loss, name='pixel_loss19')*10*3\n    tf.summary.scalar('Loss',loss10)\n    tf.add_to_collection('losses', loss10)\n\n\n\n\n\n  return tf.add_n(tf.get_collection('losses'), name='total_loss')\n\n\n\n\n\ndef _add_loss_summaries(total_loss):\n  \"\"\"\n  \"\"\"\n  # \n  loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n  losses = tf.get_collection('losses')\n  loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n  for l in losses + [total_loss]:\n    # \n    # \n    tf.summary.scalar(l.op.name + ' (raw)', l)\n    tf.summary.scalar(l.op.name, loss_averages.average(l))\n\n  return loss_averages_op\n\n\ndef train(total_loss, global_step, varName1):\n  \"\"\"\n  \"\"\"\n  # Variables that affect learning rate.\n  num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size\n  decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)\n\n  # Decay the learning rate exponentially based on the number of steps.\n  lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,\n                                  global_step,\n                                  decay_steps,\n                                  LEARNING_RATE_DECAY_FACTOR,\n                                  staircase=True)\n  tf.summary.scalar('learning_rate', lr)\n  \n  # Generate moving averages of all losses and associated summaries.\n  loss_averages_op = _add_loss_summaries(total_loss)\n\n  # Compute gradients.\n  with tf.control_dependencies([loss_averages_op]):\n    #opt = tf.train.GradientDescentOptimizer(lr)\n    opt = tf.train.AdamOptimizer(lr)\n\n    first_train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,varName1)\n    #\n    grads = opt.compute_gradients(total_loss,first_train_vars)\n#####################################################################################################################\n  # Apply gradients.\n  apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n  # Add histograms for trainable variables.\n  for var in tf.trainable_variables():\n    tf.summary.histogram(var.op.name, var)\n\n  \n\n  # Track the moving averages of all trainable variables.\n  with tf.name_scope('TRAIN') as scope:\n    variable_averages = tf.train.ExponentialMovingAverage(\n        MOVING_AVERAGE_DECAY, global_step)\n    variables_averages_op = variable_averages.apply(first_train_vars)#tf.trainable_variables())\n\n  with tf.control_dependencies([apply_gradient_op, variables_averages_op]):\n    train_op = tf.no_op(name='train')\n\n  return train_op\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2019 Yaojie Liu\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# Face De-Spoofing: Anti-Spoofing via Noise Modeling\nAmin Jourabloo*, Yaojie Liu*, Xiaoming Liu\n\n![alt text](http://www.cse.msu.edu/~liuyaoj1/images/caption_eccv18_git.png)\n\n## Setup\nInstall the Tensorflow >=1.1, <2.0.\n\nThe source code files:\n   1. \"Architecture.py\": Contains the architectures and the definitions of the loss functions.\n   2. \"data_train.py\"  : Contains the functions for reading the training data.\n   3. \"Train.py\"       : The main training file that read the training data, computes the loss functions and backpropagates error.\n   4. \"facepad-test.py\": It performs the testing on the test videos and generates the score for each frame.\n\n## Training\nTo run the training code:\nsource ~/tensorflow/bin/activate\npython /data/train_demo/code/Train.py\ndeactivate\n\n## Testing\nTo run the testing code on a test video (\"Test_video.avi\"):\n1. python facepad-test.py -input Test_video.avi -isVideo 1\n2. It will generate a txt file in the Score folder which contains the score for each frame.\n\n## Acknowledge\nPlease cite the paper:\n\n    @inproceedings{eccv18jourabloo,\n        title={Face De-Spoofing: Anti-Spoofing via Noise Modeling},\n        author={Amin Jourabloo*, Yaojie Liu*, Xiaoming Liu},\n        booktitle={In Proceeding of European Conference on Computer Vision (ECCV 2018)},\n        address={Munich, Germany},\n        year={2018}\n    }\n    \n    @inproceedings{eccv18jourabloo,\n        title={Learning Deep Models for Face Anti-Spoofing: Binary or Auxiliary Supervision},\n        author={Yaojie Liu*, Amin Jourabloo*, Xiaoming Liu},\n        booktitle={In Proceeding of IEEE Computer Vision and Pattern Recognition (CVPR 2018)},\n        address={Salt Lake City, UT},\n        year={2018}\n    }\n\nIf you have any question, please contact: [Amin Jourabloo](amin.jourabloo@gmail.com) \n   \n"
  },
  {
    "path": "Train.py",
    "content": "\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport time\n\nimport tensorflow as tf\n\nimport Architecture\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('train_dir', '/research/cvl-liuyaoj1/tensorflow/model/ECCV2018/Oulu/P1', \"\"\"Directory where to write event logs and checkpoint.\"\"\")\ntf.app.flags.DEFINE_string('eval_data', 'train_eval',\n                           \"\"\"Either 'test' or 'train_eval'.\"\"\")\ntf.app.flags.DEFINE_integer('max_steps', 2000000,\n                            \"\"\"Number of batches to run.\"\"\")\ntf.app.flags.DEFINE_boolean('log_device_placement', False,\n                            \"\"\"Whether to log device placement.\"\"\")\ntf.app.flags.DEFINE_integer('log_frequency', 10,\n                            \"\"\"How often to log results to the console.\"\"\")\ntf.app.flags.DEFINE_string('gpu', '0',\n                           \"\"\"GPU to use [1].\"\"\")\n\ndef train():\n  \"\"\"Train CIFAR-10 for a number of steps.\"\"\"\n  with tf.Graph().as_default() as g:\n    global_step = tf.contrib.framework.get_or_create_global_step()\n\n    # Get images and labels for CIFAR-10.\n    with tf.name_scope('Input') as scope:\n      images, labels, _, sizes, slabels = cifar10.distorted_inputsB(1)\n      labels = tf.image.resize_images(labels,[32, 32])\n    print(images)\n    print(labels)\n    \n    # Build a Graph that computes the logits predictions from the\n    # inference model.\n\n    dmaps, smaps, sc, dmaps_1, smaps_1, A, B,bin_labels, Nsc, Lsc, Allsc,sc_fake, sc_real, conv11_fir = cifar10.inference(images, sizes, labels, training_nn = True, training_class = True , _reuse= False)\n \n    print(smaps)\n    print(sc)\n    Label_Amin=sizes\n    # Calculate loss.\n    loss1= cifar10.lossSecond(dmaps, smaps, labels, slabels, sc, dmaps_1, smaps_1, A, B ,Label_Amin,bin_labels, Nsc, Lsc,sc_fake, sc_real)\n\n    print(loss1)\n\n    # Build a Graph that trains the model with one batch of examples and\n    # updates the model parameters.\n    train_opS = cifar10.train(loss1, global_step,\"SecondAMIN\")\n\n\n    dmaps, smaps, sc, dmaps_1, smaps_1, A, B,bin_labels, Nsc, Lsc, Allsc,sc_fake, sc_real, conv11_fir = cifar10.inference(images, sizes, labels, training_nn = True, training_class = True , _reuse= True)\n\n    loss3= cifar10.lossThird(dmaps, smaps, labels, slabels, sc, dmaps_1, smaps_1, A, B ,Label_Amin,bin_labels, Nsc, Lsc, Allsc,sc_fake, sc_real)\n\n    # Build a Graph that trains the model with one batch of examples and\n    # updates the model parameters.\n    train_opT = cifar10.train(loss3, global_step,\"ThirdAMIN\")\n\n####################################################################################################################################\n    dmaps, smaps, sc, dmaps_1, smaps_1, A, B,bin_labels, Nsc, Lsc, Allsc,sc_fake, sc_real, conv11_fir = cifar10.inference(images, sizes, labels, training_nn = True, training_class = True , _reuse= True)\n\n    loss2= cifar10.lossFirst(dmaps, smaps, labels, slabels, sc, dmaps_1, smaps_1, A, B ,Label_Amin,bin_labels, Nsc, Lsc, Allsc,sc_fake, sc_real,conv11_fir)\n\n    print(loss2)\n\n    \n    train_opF = cifar10.train(loss2, global_step,\"FirstAMIN\")# FirstAMIN\n\n    loss= loss1+ loss2 + loss3\n  \n    \n    # Build the summary operation based on the TF collection of Summaries.\n    summary_op = tf.summary.merge_all()\n    summary_writer = tf.summary.FileWriter(FLAGS.train_dir, g)\n\n    class _LoggerHook(tf.train.SessionRunHook):\n      \"\"\"Logs loss and runtime.\"\"\"\n\n      def begin(self):\n        self._step = -1\n        self._start_time = time.time()\n\n      def before_run(self, run_context):\n        self._step += 1\n        return tf.train.SessionRunArgs(loss)  # Asks for loss value.\n\n      def after_run(self, run_context, run_values):\n        if self._step % FLAGS.log_frequency == 0:\n          current_time = time.time()\n          duration = current_time - self._start_time\n          self._start_time = current_time\n\n          loss_value = run_values.results\n          examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration\n          sec_per_batch = float(duration / FLAGS.log_frequency)\n\n          format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '\n                        'sec/batch)')\n          print (format_str % (datetime.now(), self._step, loss_value,\n                               examples_per_sec, sec_per_batch))\n\n    i = 71000\n    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8, visible_device_list =FLAGS.gpu)\n    with tf.train.MonitoredTrainingSession(\n        checkpoint_dir=FLAGS.train_dir,\n        hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),\n               tf.train.NanTensorHook(loss),\n               _LoggerHook()],\n        config=tf.ConfigProto(\n            log_device_placement=FLAGS.log_device_placement, gpu_options = gpu_options),save_checkpoint_secs=240\n) as mon_sess:\n      while not mon_sess.should_stop():        \n\tif i % 100 == 1:\n\t    _, summary = mon_sess.run([train_opS, summary_op])\n\t    _, summary = mon_sess.run([train_opT, summary_op])\n\t    _, summary = mon_sess.run([train_opF, summary_op])\n\t    summary_writer.add_summary(summary, i)\n\telse:\n\t    mon_sess.run(train_opS)\n\t    mon_sess.run(train_opT)\n\t    mon_sess.run(train_opF)\n\ti += 1\n\n\ndef main(argv=None):  # pylint: disable=unused-argument\n  if tf.gfile.Exists(FLAGS.train_dir):\n     tf.gfile.MakeDirs(FLAGS.train_dir)\n  #  tf.gfile.DeleteRecursively(FLAGS.train_dir)\n  train()\n\n\nif __name__ == '__main__':\n  tf.app.run()\n"
  },
  {
    "path": "data_train.py",
    "content": "\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom six.moves import xrange  # pylint: disable=redefined-builtin\nimport tensorflow as tf\nimport numpy.random\n\n\nIMAGE_SIZE = 256\nMAP_SIZE = 64\n\n# Global constants describing the CIFAR-10 data set.\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 40000\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 62957\n\n\ndef readFromFile(filename_queue): #,metaname_queue):\n\n  class DataRecord(object):\n    pass\n  result = DataRecord()\n\n  # Count the bytes for each sample\n  result.height = 256\n  result.width = 256\n  result.depth = 3\n  result.dmap_height = 64\n  result.dmap_width = 64\n  dmap_bytes = result.dmap_height * result.dmap_width\n  image_bytes = result.height * result.width * result.depth\n  record_bytes = dmap_bytes + image_bytes + 1\n  # \n\n  # Read a record\n  data_reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n  result.key, data_value = data_reader.read(filename_queue)\n  #\n  \n\n  # Convert from a string to a vector of uint8 that is record_bytes long.\n  data_in_bytes = tf.decode_raw(data_value, tf.uint8)\n  #meta_in_bytes = tf.decode_raw(meta_value, tf.int64)\n\n\n  # \n  img = tf.reshape(\n      tf.strided_slice(data_in_bytes, [0],\n                       [0 + image_bytes]),\n      [result.depth, result.height, result.width])\n  result.image = tf.cast(tf.transpose(img, [1, 2, 0]), tf.float32) / 256\n\n  # \n  dmap = tf.reshape(\n      tf.strided_slice(data_in_bytes, [image_bytes], \n\t\t       [image_bytes + dmap_bytes]),\n      [1, result.dmap_height, result.dmap_width])\n  result.dmap = tf.cast(tf.transpose(dmap, [1, 2, 0]), tf.float32) / 256 \n\n  result.label = tf.cast(\n      tf.strided_slice(data_in_bytes, [image_bytes + dmap_bytes], [image_bytes + dmap_bytes + 1]), tf.int32)\n \n  return result\n\n\ndef _generate_image_and_label_batch(image, dmap, label, min_queue_examples,\n                                    batch_size, shuffle):\n  \n  num_preprocess_threads = 16\n  if shuffle:\n    images, dmaps, labels = tf.train.shuffle_batch(\n        [image, dmap, label],\n        batch_size=batch_size,\n        num_threads=num_preprocess_threads,\n        capacity=min_queue_examples + 3 * batch_size,\n\tallow_smaller_final_batch=False,\n        min_after_dequeue=min_queue_examples)\n  else:\n    images, dmaps, labels = tf.train.batch(\n        [image, dmap, label],\n        batch_size=batch_size,\n        num_threads=num_preprocess_threads,\n\tallow_smaller_final_batch=False,\n        capacity=min_queue_examples + 3 * batch_size)\n        \n  irgb, ihsv = tf.split(images, num_or_size_splits=2, axis=3)\n  # Display the training images in the visualizer.\n  tf.summary.image('input1', irgb)  \n  tf.summary.image('input2', ihsv)  \n  tf.summary.image('input3', dmaps)  \n \n  return images, dmaps, labels, labels, labels\n\ndef distorted_inputs(data_dir, batch_size):\n              \n  filenames11 = [os.path.join(data_dir, '/research/cvlshare/Databases/Oulu/bin/1s/train_%d.dat' % i)\n               \tfor i in xrange(1,400)]\n  filenames = filenames11 \n\n  metanames11 = [os.path.join(data_dir, '/data/train_demo/bin1/train_meta_%d.dat' % i)\n               \t     for i in xrange(1,200)]\n  metanames12 = [os.path.join(data_dir, '/data/train_demo/bin2/train_meta_%d.dat' % i)\n               \t     for i in xrange(1,200)]\n\n  metanames = metanames11 + metanames12 #+ metanames13 + metanames2 #+ metanames21\n\n  names = list(zip(filenames, metanames))\n  numpy.random.shuffle(names)\n  filenames, metanames = zip(*names)\n  \n  num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\n\n  for f in filenames:\n    if not tf.gfile.Exists(f):\n      raise ValueError('Failed to find file: ' + f)\n\n  # Create a queue that produces the filenames to read.\n  filename_queue = tf.train.string_input_producer(filenames)\n  #metaname_queue = tf.train.string_input_producer(metanames)\n \n  # Read examples from files in the filename queue.\n  read_input = readFromFile(filename_queue)#, metaname_queue)\n\n  height = IMAGE_SIZE\n  width = IMAGE_SIZE\n\n  # data augmentation\n  distorted_image = read_input.image\n  #distorted_image = tf.image.random_flip_left_right(distorted_image)\n  hsv_image = tf.image.rgb_to_hsv(distorted_image)\n\n  float_image = tf.concat([hsv_image,distorted_image],axis = 2)\n  #\n  float_image.set_shape([height, width, 6])\n  read_input.dmap.set_shape([MAP_SIZE, MAP_SIZE, 1])\n  read_input.label.set_shape([1])\n  \n  \n  # Ensure that the random shuffling has good mixing properties.\n  min_fraction_of_examples_in_queue = 0.1\n  min_queue_examples = int(num_examples_per_epoch * min_fraction_of_examples_in_queue)\n\n  print ('Filling queue with %d CASIA AntiSpoofing images before starting to train. '\n         'This will take a few minutes.' % min_queue_examples)\n  # Generate a batch of images and labels by building up a queue of examples.\n  return _generate_image_and_label_batch(float_image, read_input.dmap, read_input.label,\n                                         min_queue_examples, batch_size,\n                                         shuffle=True)\n\n\n\ndef distorted_inputsA(data_dir, batch_size):\n              \n  filenames21 = [os.path.join(data_dir, '/data/train_demo/bin4/train_%d.dat' % i)\n                for i in xrange(1,20)]\n\n  filenames = filenames21 \n\n  metanames21 = [os.path.join(data_dir, '/data/train_demo/mix/train_meta_%d.dat' % i)\n                for i in xrange(1,20)]\n  metanames = metanames21\n\n  names = list(zip(filenames, metanames))\n  numpy.random.shuffle(names)\n  filenames, metanames = zip(*names)\n  \n  num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\n\n  for f in filenames:\n    if not tf.gfile.Exists(f):\n      raise ValueError('Failed to find file: ' + f)\n\n  # Create a queue that produces the filenames to read.\n  filename_queue = tf.train.string_input_producer(filenames)\n  metaname_queue = tf.train.string_input_producer(metanames)\n \n  # Read examples from files in the filename queue.\n  read_input = readFromFile(filename_queue, metaname_queue)\n\n  height = IMAGE_SIZE\n  width = IMAGE_SIZE\n\n  # data augmentation\n  distorted_image = read_input.image\n  #distorted_image = tf.image.random_flip_left_right(distorted_image)\n  hsv_image = tf.image.rgb_to_hsv(distorted_image)\n\n  float_image = tf.concat([hsv_image,distorted_image],axis = 2)\n  #float_image = distorted_image\n  # Set the shapes of tensors.\n  float_image.set_shape([height, width, 6])\n  read_input.dmap.set_shape([MAP_SIZE, MAP_SIZE, 1])\n  read_input.label.set_shape([1])\n  \n  # Ensure that the random shuffling has good mixing properties.\n  min_fraction_of_examples_in_queue = 0.1\n  min_queue_examples = int(num_examples_per_epoch * min_fraction_of_examples_in_queue)\n\n  print ('Filling queue with %d CASIA AntiSpoofing images before starting to train. '\n         'This will take a few minutes.' % min_queue_examples)\n  # Generate a batch of images and labels by building up a queue of examples.\n  return _generate_image_and_label_batch(float_image, read_input.dmap, read_input.label, read_input.size, read_input.slabel,\n                                         min_queue_examples, batch_size,\n                                         shuffle=True)\n\n\ndef inputs(testset, data_dir, batch_size):\n  if testset == 1:\n  \tfilenames = [os.path.join(data_dir, 'CASIA-FASD/CASIA_test_%d.dat' % i)\n               \t     for i in xrange(1,11)]\n\tmetanames = [os.path.join(data_dir, 'CASIA-FASD/CASIA_test_meta_%d.dat' % i)\n               \t     for i in xrange(1,11)]\n  elif testset == 2:\n\tfilenames1 = [os.path.join(data_dir, 'CASIA-FASD/CASIA_train_%d.dat' % i)\n               for i in xrange(1,11)]\n  \tfilenames2 = [os.path.join(data_dir, 'New_DataSet/BONUS6_train_%d.dat' % i)\n               for i in xrange(1,11)]\n  \tfilenames = filenames1\n\n\tmetanames1 = [os.path.join(data_dir, 'CASIA-FASD/CASIA_train_meta_%d.dat' % i)\n               \t      for i in xrange(1,11)]\n  \tmetanames2 = [os.path.join(data_dir, 'New_DataSet/BONUS6_train_meta_%d.dat' % i)\n               \t      for i in xrange(1,11)]\n  \tmetanames = metanames1\n  elif testset == 3:\n\tfilenames = [os.path.join(data_dir, 'REPLAY-ATTACK/REPLAY-ATTACK/IDIAP128_test_%d.dat' % i)\n               \t     for i in xrange(1,11)]\n\tmetanames = [os.path.join(data_dir, 'REPLAY-ATTACK/REPLAY-ATTACK/IDIAP128_test_meta_%d.dat' % i)\n               \t     for i in xrange(1,11)]\n  else:\n\tfilenames = [os.path.join(data_dir, 'REPLAY-ATTACK/REPLAY-ATTACK/IDIAP128_test_%d.dat' % i)\n               \t     for i in xrange(1,11)]\n\tmetanames = [os.path.join(data_dir, 'REPLAY-ATTACK/REPLAY-ATTACK/IDIAP128_train_meta_%d.dat' % i)\n               \t     for i in xrange(1,11)]\n\n  num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n\n  for f in filenames:\n    if not tf.gfile.Exists(f):\n      raise ValueError('Failed to find file: ' + f)\n\n  # Create a queue that produces the filenames to read.\n  filename_queue = tf.train.string_input_producer(filenames)\n  metaname_queue = tf.train.string_input_producer(metanames)\n \n  # Read examples from files in the filename queue.\n  read_input = readFromFile(filename_queue, metaname_queue)\n\n  height = IMAGE_SIZE\n  width = IMAGE_SIZE\n\n    distorted_image = read_input.image\n  hsv_image = tf.image.rgb_to_hsv(distorted_image)\n\n  float_image = tf.concat([hsv_image,distorted_image],axis = 2)\n  # float_image = distorted_image\n  # Set the shapes of tensors.\n  float_image.set_shape([height, width, 6])\n  read_input.dmap.set_shape([MAP_SIZE, MAP_SIZE, 1])\n  read_input.label.set_shape([1])\n  read_input.size.set_shape([1])\n  read_input.slabel.set_shape([1])\n\n  # Ensure that the random shuffling has good mixing properties.\n  min_fraction_of_examples_in_queue = 0.05\n  min_queue_examples = int(num_examples_per_epoch *\n                           min_fraction_of_examples_in_queue)\n\n  print ('Filling queue with %d CASIA AntiSpoofing images before starting to test. '\n         'This will take a few minutes.' % min_queue_examples)\n  # Generate a batch of images and labels by building up a queue of examples.\n  return _generate_image_and_label_batch(float_image, read_input.dmap, read_input.label, read_input.size, read_input.slabel,\n                                         min_queue_examples, batch_size,\n                                         shuffle=False)\n"
  },
  {
    "path": "facepad-test.py",
    "content": "# Copyright 2018\n# \n# Yaojie Liu, Amin Jourabloo, Xiaoming Liu, Michigan State University\n# \n# All Rights Reserved.\n# \n# This research is based upon work supported by the Office of the Director of \n# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity\n# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and \n# conclusions contained herein are those of the authors and should not be \n# interpreted as necessarily representing the official policies or endorsements,\n# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The \n# U.S. Government is authorized to reproduce and distribute reprints for \n# Governmental purposes not withstanding any copyright annotation thereon. \n# ==============================================================================\n#\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n''' Tutorial code to use facePAD model\nThis tutorial can test the face anti-spoofing system on both video and image \nfiles. \n\nExamples:\n\tpython facepad-test.py -input ./examples/ex1.mov -isVideo 1 \n     \tpython facepad-test.py -input ./examples/ex1.jpg -isVideo 0\n\nModel Input: \n    image: Cropped face in RGB. Ideal size should be larger than 256*256\n    \nModel Output:\n    score: liveness score, range [-1,1]. Higher score (--> 1) denotes spoofness.\n    \nOther usage:\n    Pretrained model can also deploy via Tensorflow Serving. The instruction of \nTensorflow Serving can be found at:\n\nhttps://www.tensorflow.org/serving/serving_basic\n\nThe signature of the model is:\n\n    inputs  = {'images': facepad_inputs}\n    outputs = {'depths': facepad_output_depth,\n               'scores': facepad_output_scores}\n'''\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nimport sys\nimport os\nimport time\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n# face detector && pad set-up\nfaced_dir = './haarcascade_frontalface_alt.xml'\nexport_dir = './lib'\nfaceCascade = cv2.CascadeClassifier(faced_dir)\n\n# Basic model parameters.\nIMAGE_SIZE = 256 #input image size\n\n# name_scope\ninputname = \"input:0\"\noutputname = \"Mean_2:0\"#SecondAmin/\n\ndef facePAD_API(image):\n  '''\n    API Input: \n        image: Cropped face in RGB at any size. Ideally, image is larger than \n        256*256 and the dtype is uint8\n    \n    API Output:\n        score: liveness score, float32, range [-1,1]. Higher score (--> 1) \n        denotes spoofness.\n  '''\n  with tf.Session() as sess:\n    # load the facepad model\n    tf.saved_model.loader.load(sess, \n    \t\t\t[tf.saved_model.tag_constants.SERVING], \n    \t\t\texport_dir)\n    _input = tf.get_default_graph().get_tensor_by_name(inputname)\n    _output = tf.get_default_graph().get_tensor_by_name(outputname)\n    \n    score = sess.run(_output,feed_dict={_input : image})\n\n  return score\n\ndef evaluate_image(imfile,scfile):\n  with tf.Session() as sess:\n    # load the facepad model\n    tf.saved_model.loader.load(sess, \n    \t\t\t[tf.saved_model.tag_constants.SERVING], \n    \t\t\texport_dir)\n    image  = tf.get_default_graph().get_tensor_by_name(inputname)\n    scores = tf.get_default_graph().get_tensor_by_name(outputname)\n    \n    # get the image\n    frame = cv2.imread(imfile)\n    # detect faces in the frame. Detected face in faces with (x,y,w,h)\n    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n    faces = faceCascade.detectMultiScale(\n      gray,\n      scaleFactor=1.1,\n      minNeighbors=5,\n      minSize=(256,256)\n      )\n    try:\n      faces = sorted(faces, key=lambda x:x[2]) \n      faces = [faces[0]] # only process the largest face\n    except:\n      print(\"No face detected!\")\n      sys.exit()\n     \n    for (x, y, w, h) in faces:\n       # crop face from frame\n       l = max(w,h)\n       face_raw = frame[y:y+l, x:x+l]\n       # run the facepad\n       sc = sess.run(scores,feed_dict={image : face_raw})\n       # save the score for video frames\n       scfile.write(\"%.3f\\n\" % sc)\n\n  return scfile\n\ndef evaluate_video(vdfile,scfile):\n  # get the video\n  video_capture = cv2.VideoCapture(vdfile)\n  bbox = np.loadtxt(vdfile[:-3]+'txt',dtype=np.str,delimiter=',')\n  bbox = bbox[:,1:]\n  (major_ver, _, _, _) = (cv2.__version__).split('.')\n  if int(major_ver) < 3:\n      totalframes = video_capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)\n  else:\n      totalframes = video_capture.get(cv2.CAP_PROP_FRAME_COUNT)\n        \n  with tf.Session() as sess:\n    # load the facepad model\n    tf.saved_model.loader.load(sess, \n    \t\t\t[tf.saved_model.tag_constants.SERVING], \n    \t\t\texport_dir)\n    image  = tf.get_default_graph().get_tensor_by_name(inputname)\n    scores = tf.get_default_graph().get_tensor_by_name(outputname)\n    \n    # container for last frame's detected faces\n    last_time_face = []\n    fr = 0\n    while(fr < totalframes and fr < bbox.shape[0]):\n      # get the frame from video\n      _, frame = video_capture.read()\n      x = int(bbox[fr,0])\n      y = int(bbox[fr,1])\n      w = int(bbox[fr,2]) - int(bbox[fr,0])\n      h = int(bbox[fr,3]) - int(bbox[fr,1])\n      fr += 1\n        \n      l = max(w,h)\n      dl = l * 1.5 / 2\n      x = int(x - dl)\n      y = int(y - 1.1*dl)\n      l = int(l + dl + dl)\n  \n      # crop face from frame\n      face_raw = frame[y:y+l, x:x+l]\n      #cv2.imshow('image',face_raw)\n      #cv2.waitKey(0)\n      #input()\n      # run the facepad\n      start = time.time()\n      sc = sess.run(scores,feed_dict={image : face_raw})\n      # save the score for video frames\n      scfile.write(\"%.3f\\n\" % sc)\n      print(sc)\n          \n          \n       \n  return scfile\n\ndef getopts(argv,opts):\n    while argv:  # While there are arguments left to parse...\n        if argv[0][0] == '-':  # Found a \"-name value\" pair.\n            if argv[0][1] == 'h':\n            \tprint('-h : help')\n            \tprint('-input : STRING, the path to the testing video')\n            \tprint('-isVideo : True/False, indicate if it is a video. Default as False.')\n        \tsys.exit()\n            opts[argv[0]] = argv[1]  # Add key and value to the dictionary.\n        argv = argv[1:]  # Reduce the argument list by copying it starting from index 1.\n    return opts\n\nif __name__ == '__main__':\n    myargs = {}\n    myargs = getopts(sys.argv,myargs)        \n    isVideo = myargs['-isVideo']\n    vdfile = myargs['-input']\n    if vdfile[-4] == '.':\n        scfile = open('./score/'+vdfile[-12:-3]+'score','w')\n    else:\n\tscfile = open('./score/'+vdfile[-12:-4]+'score','w')\n\n    print(vdfile)\n    print('Processing...')\n    \n    if isVideo == '1':\n    \tscfile = evaluate_video(vdfile,scfile)  \n    else:\n    \tscfile = evaluate_image(vdfile,scfile)  \n    \t\n    scfile.close()\n    print('Done!')\n"
  },
  {
    "path": "score/1_1_36_1.score",
    "content": "0.107\n0.089\n0.123\n0.131\n0.149\n0.147\n0.142\n0.120\n0.117\n0.131\n0.116\n0.136\n0.132\n0.114\n0.121\n0.120\n0.123\n0.132\n0.124\n0.125\n0.131\n0.124\n0.132\n0.124\n0.132\n0.127\n0.123\n0.123\n0.123\n0.113\n0.101\n0.102\n0.139\n0.164\n0.137\n0.128\n0.133\n0.124\n0.132\n0.137\n0.123\n0.114\n0.137\n0.114\n0.134\n0.155\n0.137\n0.128\n0.127\n0.126\n0.120\n0.138\n0.145\n0.137\n0.140\n0.130\n0.130\n0.112\n0.124\n0.101\n0.136\n0.086\n0.114\n0.117\n0.115\n0.107\n0.121\n0.114\n0.118\n0.125\n0.116\n0.149\n0.134\n0.123\n0.129\n0.138\n0.120\n0.105\n0.118\n0.124\n0.130\n0.127\n0.125\n0.113\n0.116\n0.116\n0.125\n0.119\n0.130\n0.113\n0.130\n0.097\n0.149\n0.123\n0.129\n0.129\n0.132\n0.123\n0.120\n0.123\n0.117\n0.127\n0.133\n0.134\n0.137\n0.136\n0.153\n0.144\n0.123\n0.128\n0.118\n0.133\n0.116\n0.114\n0.117\n0.112\n0.118\n0.117\n0.123\n0.112\n0.120\n0.087\n0.148\n0.123\n0.144\n0.135\n0.132\n0.147\n0.138\n0.136\n0.139\n0.125\n0.123\n0.118\n0.130\n0.131\n0.116\n0.142\n0.132\n0.134\n0.136\n0.138\n0.138\n0.138\n0.140\n0.128\n0.126\n0.131\n0.133\n0.125\n0.133\n"
  },
  {
    "path": "score/1_1_36_3.score",
    "content": "0.474\n0.469\n0.470\n0.471\n0.455\n0.466\n0.469\n0.483\n0.465\n0.466\n0.469\n0.468\n0.457\n0.459\n0.465\n0.463\n0.460\n0.464\n0.470\n0.465\n0.471\n0.468\n0.466\n0.473\n0.472\n0.480\n0.464\n0.473\n0.456\n0.465\n0.457\n0.448\n0.460\n0.453\n0.456\n0.463\n0.465\n0.454\n0.457\n0.464\n0.454\n0.443\n0.466\n0.470\n0.456\n0.469\n0.465\n0.469\n0.455\n0.475\n0.463\n0.469\n0.455\n0.463\n0.452\n0.473\n0.466\n0.462\n0.451\n0.463\n0.472\n0.468\n0.465\n0.475\n0.478\n0.466\n0.479\n0.460\n0.472\n0.459\n0.460\n0.471\n0.471\n0.474\n0.473\n0.464\n0.469\n0.472\n0.465\n0.475\n0.473\n0.478\n0.469\n0.470\n0.456\n0.479\n0.473\n0.476\n0.464\n0.484\n0.469\n0.464\n0.465\n0.467\n0.461\n0.465\n0.456\n0.468\n0.467\n0.464\n0.465\n0.473\n0.458\n0.465\n0.486\n0.473\n0.482\n0.470\n0.484\n0.487\n0.480\n0.465\n0.480\n0.468\n0.461\n0.471\n0.466\n0.464\n0.466\n0.457\n0.457\n0.469\n0.480\n0.473\n0.467\n0.466\n0.473\n0.472\n0.476\n0.459\n0.475\n0.469\n0.475\n0.464\n0.464\n0.474\n0.457\n0.479\n0.469\n0.471\n0.465\n0.473\n0.466\n0.478\n0.475\n0.468\n0.482\n0.475\n0.460\n0.452\n0.462\n"
  },
  {
    "path": "score/1_1_36_5.score",
    "content": "0.529\n0.509\n0.525\n0.530\n0.503\n0.512\n0.547\n0.547\n0.543\n0.537\n0.530\n0.536\n0.543\n0.536\n0.544\n0.529\n0.529\n0.534\n0.534\n0.542\n0.528\n0.532\n0.542\n0.550\n0.544\n0.550\n0.538\n0.524\n0.534\n0.544\n0.543\n0.514\n0.541\n0.529\n0.532\n0.533\n0.537\n0.539\n0.506\n0.511\n0.520\n0.522\n0.508\n0.526\n0.521\n0.522\n0.526\n0.535\n0.522\n0.533\n0.529\n0.524\n0.509\n0.520\n0.521\n0.524\n0.524\n0.531\n0.521\n0.541\n0.527\n0.547\n0.532\n0.524\n0.530\n0.533\n0.512\n0.513\n0.524\n0.518\n0.518\n0.513\n0.521\n0.510\n0.521\n0.510\n0.542\n0.520\n0.510\n0.529\n0.541\n0.547\n0.528\n0.531\n0.523\n0.532\n0.527\n0.530\n0.523\n0.531\n0.539\n0.536\n0.559\n0.550\n0.546\n0.538\n0.544\n0.539\n0.527\n0.541\n0.535\n0.531\n0.536\n0.536\n0.535\n0.522\n0.535\n0.540\n0.541\n0.545\n0.530\n0.532\n0.536\n0.525\n0.520\n0.522\n0.540\n0.532\n0.519\n0.497\n0.511\n0.503\n0.529\n0.517\n0.513\n0.522\n0.519\n0.514\n0.523\n0.520\n0.508\n0.516\n0.513\n0.512\n0.520\n0.521\n0.520\n0.505\n0.518\n0.513\n0.507\n0.518\n0.521\n0.520\n0.514\n0.522\n0.511\n0.525\n0.523\n0.514\n0.514\n"
  }
]